Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module gluon.dal

    1  #!/bin/env python 
    2  # -*- coding: utf-8 -*- 
    3   
    4  """ 
    5  This file is part of the web2py Web Framework 
    6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
    7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
    8   
    9  Thanks to 
   10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
   11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
   12      * Denes 
   13      * Chris Clark 
   14      * clach05 
   15      * Denes Lengyel 
   16      * and many others who have contributed to current and previous versions 
   17   
   18  This file contains the DAL support for many relational databases, 
   19  including: 
   20  - SQLite & SpatiaLite 
   21  - MySQL 
   22  - Postgres 
   23  - Firebird 
   24  - Oracle 
   25  - MS SQL 
   26  - DB2 
   27  - Interbase 
   28  - Ingres 
   29  - Informix (9+ and SE) 
   30  - SapDB (experimental) 
   31  - Cubrid (experimental) 
   32  - CouchDB (experimental) 
   33  - MongoDB (in progress) 
   34  - Google:nosql 
   35  - Google:sql 
   36  - Teradata 
   37  - IMAP (experimental) 
   38   
   39  Example of usage: 
   40   
   41  >>> # from dal import DAL, Field 
   42   
   43  ### create DAL connection (and create DB if it doesn't exist) 
   44  >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'), 
   45  ... folder=None) 
   46   
   47  ### define a table 'person' (create/alter as necessary) 
   48  >>> person = db.define_table('person',Field('name','string')) 
   49   
   50  ### insert a record 
   51  >>> id = person.insert(name='James') 
   52   
   53  ### retrieve it by id 
   54  >>> james = person(id) 
   55   
   56  ### retrieve it by name 
   57  >>> james = person(name='James') 
   58   
   59  ### retrieve it by arbitrary query 
   60  >>> query = (person.name=='James') & (person.name.startswith('J')) 
   61  >>> james = db(query).select(person.ALL)[0] 
   62   
   63  ### update one record 
   64  >>> james.update_record(name='Jim') 
   65  <Row {'id': 1, 'name': 'Jim'}> 
   66   
   67  ### update multiple records by query 
   68  >>> db(person.name.like('J%')).update(name='James') 
   69  1 
   70   
   71  ### delete records by query 
   72  >>> db(person.name.lower() == 'jim').delete() 
   73  0 
   74   
   75  ### retrieve multiple records (rows) 
   76  >>> people = db(person).select(orderby=person.name, 
   77  ... groupby=person.name, limitby=(0,100)) 
   78   
   79  ### further filter them 
   80  >>> james = people.find(lambda row: row.name == 'James').first() 
   81  >>> print james.id, james.name 
   82  1 James 
   83   
   84  ### check aggregates 
   85  >>> counter = person.id.count() 
   86  >>> print db(person).select(counter).first()(counter) 
   87  1 
   88   
   89  ### delete one record 
   90  >>> james.delete_record() 
   91  1 
   92   
   93  ### delete (drop) entire database table 
   94  >>> person.drop() 
   95   
   96  Supported field types: 
   97  id string text boolean integer double decimal password upload 
   98  blob time date datetime 
   99   
  100  Supported DAL URI strings: 
  101  'sqlite://test.db' 
  102  'spatialite://test.db' 
  103  'sqlite:memory' 
  104  'spatialite:memory' 
  105  'jdbc:sqlite://test.db' 
  106  'mysql://root:none@localhost/test' 
  107  'postgres://mdipierro:password@localhost/test' 
  108  'postgres:psycopg2://mdipierro:password@localhost/test' 
  109  'postgres:pg8000://mdipierro:password@localhost/test' 
  110  'jdbc:postgres://mdipierro:none@localhost/test' 
  111  'mssql://web2py:none@A64X2/web2py_test' 
  112  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
  113  'oracle://username:password@database' 
  114  'firebird://user:password@server:3050/database' 
  115  'db2://DSN=dsn;UID=user;PWD=pass' 
  116  'firebird://username:password@hostname/database' 
  117  'firebird_embedded://username:password@c://path' 
  118  'informix://user:password@server:3050/database' 
  119  'informixu://user:password@server:3050/database' # unicode informix 
  120  'ingres://database'  # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name' 
  121  'google:datastore' # for google app engine datastore 
  122  'google:sql' # for google app engine with sql (mysql compatible) 
  123  'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental 
  124  'imap://user:password@server:port' # experimental 
  125  'mongodb://user:password@server:port/database' # experimental 
  126   
  127  For more info: 
  128  help(DAL) 
  129  help(Field) 
  130  """ 
  131   
  132  ################################################################################### 
  133  # this file only exposes DAL and Field 
  134  ################################################################################### 
  135   
  136  __all__ = ['DAL', 'Field'] 
  137   
  138  DEFAULTLENGTH = {'string':512, 
  139                   'password':512, 
  140                   'upload':512, 
  141                   'text':2**15, 
  142                   'blob':2**31} 
  143  TIMINGSSIZE = 100 
  144  SPATIALLIBS = { 
  145      'Windows':'libspatialite', 
  146      'Linux':'libspatialite.so', 
  147      'Darwin':'libspatialite.dylib' 
  148      } 
  149  DEFAULT_URI = 'sqlite://dummy.db' 
  150   
  151  import re 
  152  import sys 
  153  import locale 
  154  import os 
  155  import types 
  156  import datetime 
  157  import threading 
  158  import time 
  159  import csv 
  160  import cgi 
  161  import copy 
  162  import socket 
  163  import logging 
  164  import base64 
  165  import shutil 
  166  import marshal 
  167  import decimal 
  168  import struct 
  169  import urllib 
  170  import hashlib 
  171  import uuid 
  172  import glob 
  173  import traceback 
  174  import platform 
  175   
  176  PYTHON_VERSION = sys.version_info[0] 
  177  if PYTHON_VERSION == 2: 
  178      import cPickle as pickle 
  179      import cStringIO as StringIO 
  180      import copy_reg as copyreg 
  181      hashlib_md5 = hashlib.md5 
  182      bytes, unicode = str, unicode 
  183  else: 
  184      import pickle 
  185      from io import StringIO as StringIO 
  186      import copyreg 
  187      long = int 
  188      hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8')) 
  189      bytes, unicode = bytes, str 
  190   
  191  CALLABLETYPES = (types.LambdaType, types.FunctionType, 
  192                   types.BuiltinFunctionType, 
  193                   types.MethodType, types.BuiltinMethodType) 
  194   
  195  TABLE_ARGS = set( 
  196      ('migrate','primarykey','fake_migrate','format','redefine', 
  197       'singular','plural','trigger_name','sequence_name','fields', 
  198       'common_filter','polymodel','table_class','on_define','actual_name')) 
  199   
  200  SELECT_ARGS = set( 
  201      ('orderby', 'groupby', 'limitby','required', 'cache', 'left', 
  202       'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby')) 
  203   
  204  ogetattr = object.__getattribute__ 
  205  osetattr = object.__setattr__ 
  206  exists = os.path.exists 
  207  pjoin = os.path.join 
  208   
  209  ################################################################################### 
  210  # following checks allow the use of dal without web2py, as a standalone module 
  211  ################################################################################### 
  212  try: 
  213      from gluon.utils import web2py_uuid 
  214  except (ImportError, SystemError): 
  215      import uuid 
216 - def web2py_uuid(): return str(uuid.uuid4())
217 218 try: 219 import portalocker 220 have_portalocker = True 221 except ImportError: 222 have_portalocker = False 223 224 try: 225 from gluon import serializers 226 have_serializers = True 227 except ImportError: 228 have_serializers = False 229 try: 230 import json as simplejson 231 except ImportError: 232 try: 233 import gluon.contrib.simplejson as simplejson 234 except ImportError: 235 simplejson = None 236 237 LOGGER = logging.getLogger("web2py.dal") 238 DEFAULT = lambda:0 239 240 GLOBAL_LOCKER = threading.RLock() 241 THREAD_LOCAL = threading.local() 242 243 # internal representation of tables with field 244 # <table>.<field>, tables and fields may only be [a-zA-Z0-9_] 245 246 REGEX_TYPE = re.compile('^([\w\_\:]+)') 247 REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*') 248 REGEX_W = re.compile('^\w+$') 249 REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.(\w+)$') 250 REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)(\.(?P<name>\w+))?\.\w+$') 251 REGEX_CLEANUP_FN = re.compile('[\'"\s;]+') 252 REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)') 253 REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 254 REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)") 255 REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') 256 REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 257 REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$') 258 REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$') 259 REGEX_QUOTES = re.compile("'[^']*'") 260 REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$') 261 REGEX_PASSWORD = re.compile('\://([^:@]*)\:') 262 REGEX_NOPASSWD = re.compile('\/\/[\w\.\-]+[\:\/](.+)(?=@)') # was '(?<=[\:\/])([^:@/]+)(?=@.+)' 263 264 # list of drivers will be built on the fly 265 # and lists only what is available 266 DRIVERS = [] 267 268 try: 269 from new import classobj 270 from google.appengine.ext import db as gae 271 from google.appengine.api import namespace_manager, rdbms 272 from google.appengine.api.datastore_types import Key ### for belongs on ID 273 from google.appengine.ext.db.polymodel import PolyModel 274 DRIVERS.append('google') 275 except ImportError: 276 pass 277 278 if not 'google' in DRIVERS: 279 280 try: 281 from pysqlite2 import dbapi2 as sqlite2 282 DRIVERS.append('SQLite(sqlite2)') 283 except ImportError: 284 LOGGER.debug('no SQLite drivers pysqlite2.dbapi2') 285 286 try: 287 from sqlite3 import dbapi2 as sqlite3 288 DRIVERS.append('SQLite(sqlite3)') 289 except ImportError: 290 LOGGER.debug('no SQLite drivers sqlite3') 291 292 try: 293 # first try contrib driver, then from site-packages (if installed) 294 try: 295 import gluon.contrib.pymysql as pymysql 296 # monkeypatch pymysql because they havent fixed the bug: 297 # https://github.com/petehunt/PyMySQL/issues/86 298 pymysql.ESCAPE_REGEX = re.compile("'") 299 pymysql.ESCAPE_MAP = {"'": "''"} 300 # end monkeypatch 301 except ImportError: 302 import pymysql 303 DRIVERS.append('MySQL(pymysql)') 304 except ImportError: 305 LOGGER.debug('no MySQL driver pymysql') 306 307 try: 308 import MySQLdb 309 DRIVERS.append('MySQL(MySQLdb)') 310 except ImportError: 311 LOGGER.debug('no MySQL driver MySQLDB') 312 313 try: 314 import mysql.connector as mysqlconnector 315 DRIVERS.append("MySQL(mysqlconnector)") 316 except ImportError: 317 LOGGER.debug("no driver mysql.connector") 318 319 try: 320 import psycopg2 321 from psycopg2.extensions import adapt as psycopg2_adapt 322 DRIVERS.append('PostgreSQL(psycopg2)') 323 except ImportError: 324 LOGGER.debug('no PostgreSQL driver psycopg2') 325 326 try: 327 # first try contrib driver, then from site-packages (if installed) 328 try: 329 import gluon.contrib.pg8000.dbapi as pg8000 330 except ImportError: 331 import pg8000.dbapi as pg8000 332 DRIVERS.append('PostgreSQL(pg8000)') 333 except ImportError: 334 LOGGER.debug('no PostgreSQL driver pg8000') 335 336 try: 337 import cx_Oracle 338 DRIVERS.append('Oracle(cx_Oracle)') 339 except ImportError: 340 LOGGER.debug('no Oracle driver cx_Oracle') 341 342 try: 343 try: 344 import pyodbc 345 except ImportError: 346 try: 347 import gluon.contrib.pypyodbc as pyodbc 348 except Exception, e: 349 raise ImportError(str(e)) 350 DRIVERS.append('MSSQL(pyodbc)') 351 DRIVERS.append('DB2(pyodbc)') 352 DRIVERS.append('Teradata(pyodbc)') 353 DRIVERS.append('Ingres(pyodbc)') 354 except ImportError: 355 LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc') 356 357 try: 358 import Sybase 359 DRIVERS.append('Sybase(Sybase)') 360 except ImportError: 361 LOGGER.debug('no Sybase driver') 362 363 try: 364 import kinterbasdb 365 DRIVERS.append('Interbase(kinterbasdb)') 366 DRIVERS.append('Firebird(kinterbasdb)') 367 except ImportError: 368 LOGGER.debug('no Firebird/Interbase driver kinterbasdb') 369 370 try: 371 import fdb 372 DRIVERS.append('Firebird(fdb)') 373 except ImportError: 374 LOGGER.debug('no Firebird driver fdb') 375 ##### 376 try: 377 import firebirdsql 378 DRIVERS.append('Firebird(firebirdsql)') 379 except ImportError: 380 LOGGER.debug('no Firebird driver firebirdsql') 381 382 try: 383 import informixdb 384 DRIVERS.append('Informix(informixdb)') 385 LOGGER.warning('Informix support is experimental') 386 except ImportError: 387 LOGGER.debug('no Informix driver informixdb') 388 389 try: 390 import sapdb 391 DRIVERS.append('SQL(sapdb)') 392 LOGGER.warning('SAPDB support is experimental') 393 except ImportError: 394 LOGGER.debug('no SAP driver sapdb') 395 396 try: 397 import cubriddb 398 DRIVERS.append('Cubrid(cubriddb)') 399 LOGGER.warning('Cubrid support is experimental') 400 except ImportError: 401 LOGGER.debug('no Cubrid driver cubriddb') 402 403 try: 404 from com.ziclix.python.sql import zxJDBC 405 import java.sql 406 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 407 from org.sqlite import JDBC # required by java.sql; ensure we have it 408 zxJDBC_sqlite = java.sql.DriverManager 409 DRIVERS.append('PostgreSQL(zxJDBC)') 410 DRIVERS.append('SQLite(zxJDBC)') 411 LOGGER.warning('zxJDBC support is experimental') 412 is_jdbc = True 413 except ImportError: 414 LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC') 415 is_jdbc = False 416 417 try: 418 import couchdb 419 DRIVERS.append('CouchDB(couchdb)') 420 except ImportError: 421 LOGGER.debug('no Couchdb driver couchdb') 422 423 try: 424 import pymongo 425 DRIVERS.append('MongoDB(pymongo)') 426 except: 427 LOGGER.debug('no MongoDB driver pymongo') 428 429 try: 430 import imaplib 431 DRIVERS.append('IMAP(imaplib)') 432 except: 433 LOGGER.debug('no IMAP driver imaplib') 434 435 PLURALIZE_RULES = [ 436 (re.compile('child$'), re.compile('child$'), 'children'), 437 (re.compile('oot$'), re.compile('oot$'), 'eet'), 438 (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), 439 (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), 440 (re.compile('sis$'), re.compile('sis$'), 'ses'), 441 (re.compile('man$'), re.compile('man$'), 'men'), 442 (re.compile('ife$'), re.compile('ife$'), 'ives'), 443 (re.compile('eau$'), re.compile('eau$'), 'eaux'), 444 (re.compile('lf$'), re.compile('lf$'), 'lves'), 445 (re.compile('[sxz]$'), re.compile('$'), 'es'), 446 (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), 447 (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), 448 (re.compile('$'), re.compile('$'), 's'), 449 ]
450 451 -def pluralize(singular, rules=PLURALIZE_RULES):
452 for line in rules: 453 re_search, re_sub, replace = line 454 plural = re_search.search(singular) and re_sub.sub(replace, singular) 455 if plural: return plural
456
457 -def hide_password(uri):
458 if isinstance(uri,(list,tuple)): 459 return [hide_password(item) for item in uri] 460 return REGEX_NOPASSWD.sub('******',uri)
461
462 -def OR(a,b):
463 return a|b
464
465 -def AND(a,b):
466 return a&b
467
468 -def IDENTITY(x): return x
469
470 -def varquote_aux(name,quotestr='%s'):
471 return name if REGEX_W.match(name) else quotestr % name
472
473 -def quote_keyword(a,keyword='timestamp'):
474 regex = re.compile('\.keyword(?=\w)') 475 a = regex.sub('."%s"' % keyword,a) 476 return a
477 478 if 'google' in DRIVERS: 479 480 is_jdbc = False
481 482 - class GAEDecimalProperty(gae.Property):
483 """ 484 GAE decimal implementation 485 """ 486 data_type = decimal.Decimal 487
488 - def __init__(self, precision, scale, **kwargs):
489 super(GAEDecimalProperty, self).__init__(self, **kwargs) 490 d = '1.' 491 for x in range(scale): 492 d += '0' 493 self.round = decimal.Decimal(d)
494
495 - def get_value_for_datastore(self, model_instance):
496 value = super(GAEDecimalProperty, self)\ 497 .get_value_for_datastore(model_instance) 498 if value is None or value == '': 499 return None 500 else: 501 return str(value)
502
503 - def make_value_from_datastore(self, value):
504 if value is None or value == '': 505 return None 506 else: 507 return decimal.Decimal(value).quantize(self.round)
508
509 - def validate(self, value):
510 value = super(GAEDecimalProperty, self).validate(value) 511 if value is None or isinstance(value, decimal.Decimal): 512 return value 513 elif isinstance(value, basestring): 514 return decimal.Decimal(value) 515 raise gae.BadValueError("Property %s must be a Decimal or string."\ 516 % self.name)
517
518 ################################################################################### 519 # class that handles connection pooling (all adapters are derived from this one) 520 ################################################################################### 521 522 -class ConnectionPool(object):
523 524 POOLS = {} 525 check_active_connection = True 526 527 @staticmethod
528 - def set_folder(folder):
530 531 # ## this allows gluon to commit/rollback all dbs in this thread 532
533 - def close(self,action='commit',really=True):
534 if action: 535 if callable(action): 536 action(self) 537 else: 538 getattr(self, action)() 539 # ## if you want pools, recycle this connection 540 if self.pool_size: 541 GLOBAL_LOCKER.acquire() 542 pool = ConnectionPool.POOLS[self.uri] 543 if len(pool) < self.pool_size: 544 pool.append(self.connection) 545 really = False 546 GLOBAL_LOCKER.release() 547 if really: 548 self.close_connection() 549 self.connection = None
550 551 @staticmethod
552 - def close_all_instances(action):
553 """ to close cleanly databases in a multithreaded environment """ 554 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 555 for db_uid, db_group in dbs: 556 for db in db_group: 557 if hasattr(db,'_adapter'): 558 db._adapter.close(action) 559 getattr(THREAD_LOCAL,'db_instances',{}).clear() 560 getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear() 561 if callable(action): 562 action(None) 563 return
564
565 - def find_or_make_work_folder(self):
566 """ this actually does not make the folder. it has to be there """ 567 self.folder = getattr(THREAD_LOCAL,'folder','') 568 569 if (os.path.isabs(self.folder) and 570 isinstance(self, UseDatabaseStoredFile) and 571 self.folder.startswith(os.getcwd())): 572 self.folder = os.path.relpath(self.folder, os.getcwd()) 573 574 # Creating the folder if it does not exist 575 if False and self.folder and not exists(self.folder): 576 os.mkdir(self.folder)
577
578 - def after_connection_hook(self):
579 """hook for the after_connection parameter""" 580 if callable(self._after_connection): 581 self._after_connection(self) 582 self.after_connection()
583
584 - def after_connection(self):
585 """ this it is supposed to be overloaded by adapters""" 586 pass
587
588 - def reconnect(self, f=None, cursor=True):
589 """ 590 this function defines: self.connection and self.cursor 591 (iff cursor is True) 592 if self.pool_size>0 it will try pull the connection from the pool 593 if the connection is not active (closed by db server) it will loop 594 if not self.pool_size or no active connections in pool makes a new one 595 """ 596 if getattr(self,'connection', None) != None: 597 return 598 if f is None: 599 f = self.connector 600 601 # if not hasattr(self, "driver") or self.driver is None: 602 # LOGGER.debug("Skipping connection since there's no driver") 603 # return 604 605 if not self.pool_size: 606 self.connection = f() 607 self.cursor = cursor and self.connection.cursor() 608 else: 609 uri = self.uri 610 POOLS = ConnectionPool.POOLS 611 while True: 612 GLOBAL_LOCKER.acquire() 613 if not uri in POOLS: 614 POOLS[uri] = [] 615 if POOLS[uri]: 616 self.connection = POOLS[uri].pop() 617 GLOBAL_LOCKER.release() 618 self.cursor = cursor and self.connection.cursor() 619 try: 620 if self.cursor and self.check_active_connection: 621 self.execute('SELECT 1;') 622 break 623 except: 624 pass 625 else: 626 GLOBAL_LOCKER.release() 627 self.connection = f() 628 self.cursor = cursor and self.connection.cursor() 629 break 630 self.after_connection_hook()
631
632 633 ################################################################################### 634 # this is a generic adapter that does nothing; all others are derived from this one 635 ################################################################################### 636 637 -class BaseAdapter(ConnectionPool):
638 native_json = False 639 driver = None 640 driver_name = None 641 drivers = () # list of drivers from which to pick 642 connection = None 643 commit_on_alter_table = False 644 support_distributed_transaction = False 645 uploads_in_blob = False 646 can_select_for_update = True 647 dbpath = None 648 folder = None 649 650 TRUE = 'T' 651 FALSE = 'F' 652 T_SEP = ' ' 653 QUOTE_TEMPLATE = '"%s"' 654 655 types = { 656 'boolean': 'CHAR(1)', 657 'string': 'CHAR(%(length)s)', 658 'text': 'TEXT', 659 'json': 'TEXT', 660 'password': 'CHAR(%(length)s)', 661 'blob': 'BLOB', 662 'upload': 'CHAR(%(length)s)', 663 'integer': 'INTEGER', 664 'bigint': 'INTEGER', 665 'float':'DOUBLE', 666 'double': 'DOUBLE', 667 'decimal': 'DOUBLE', 668 'date': 'DATE', 669 'time': 'TIME', 670 'datetime': 'TIMESTAMP', 671 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 672 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 673 'list:integer': 'TEXT', 674 'list:string': 'TEXT', 675 'list:reference': 'TEXT', 676 # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' 677 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', 678 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 679 } 680
681 - def isOperationalError(self,exception):
682 if not hasattr(self.driver, "OperationalError"): 683 return None 684 return isinstance(exception, self.driver.OperationalError)
685
686 - def isProgrammingError(self,exception):
687 if not hasattr(self.driver, "ProgrammingError"): 688 return None 689 return isinstance(exception, self.driver.ProgrammingError)
690
691 - def id_query(self, table):
692 pkeys = getattr(table,'_primarykey',None) 693 if pkeys: 694 return table[pkeys[0]] != None 695 else: 696 return table._id != None
697
698 - def adapt(self, obj):
699 return "'%s'" % obj.replace("'", "''")
700
701 - def smart_adapt(self, obj):
702 if isinstance(obj,(int,float)): 703 return str(obj) 704 return self.adapt(str(obj))
705
706 - def file_exists(self, filename):
707 """ 708 to be used ONLY for files that on GAE may not be on filesystem 709 """ 710 return exists(filename)
711
712 - def file_open(self, filename, mode='rb', lock=True):
713 """ 714 to be used ONLY for files that on GAE may not be on filesystem 715 """ 716 if have_portalocker and lock: 717 fileobj = portalocker.LockedFile(filename,mode) 718 else: 719 fileobj = open(filename,mode) 720 return fileobj
721
722 - def file_close(self, fileobj):
723 """ 724 to be used ONLY for files that on GAE may not be on filesystem 725 """ 726 if fileobj: 727 fileobj.close()
728
729 - def file_delete(self, filename):
730 os.unlink(filename)
731
732 - def find_driver(self,adapter_args,uri=None):
733 self.adapter_args = adapter_args 734 if getattr(self,'driver',None) != None: 735 return 736 drivers_available = [driver for driver in self.drivers 737 if driver in globals()] 738 if uri: 739 items = uri.split('://',1)[0].split(':') 740 request_driver = items[1] if len(items)>1 else None 741 else: 742 request_driver = None 743 request_driver = request_driver or adapter_args.get('driver') 744 if request_driver: 745 if request_driver in drivers_available: 746 self.driver_name = request_driver 747 self.driver = globals().get(request_driver) 748 else: 749 raise RuntimeError("driver %s not available" % request_driver) 750 elif drivers_available: 751 self.driver_name = drivers_available[0] 752 self.driver = globals().get(self.driver_name) 753 else: 754 raise RuntimeError("no driver available %s" % str(self.drivers))
755
756 - def log(self, message, table=None):
757 """ Logs migrations 758 759 It will not log changes if logfile is not specified. Defaults 760 to sql.log 761 """ 762 763 isabs = None 764 logfilename = self.adapter_args.get('logfile','sql.log') 765 writelog = bool(logfilename) 766 if writelog: 767 isabs = os.path.isabs(logfilename) 768 769 if table and table._dbt and writelog and self.folder: 770 if isabs: 771 table._loggername = logfilename 772 else: 773 table._loggername = pjoin(self.folder, logfilename) 774 logfile = self.file_open(table._loggername, 'a') 775 logfile.write(message) 776 self.file_close(logfile)
777 778
779 - def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', 780 credential_decoder=IDENTITY, driver_args={}, 781 adapter_args={},do_connect=True, after_connection=None):
782 self.db = db 783 self.dbengine = "None" 784 self.uri = uri 785 self.pool_size = pool_size 786 self.folder = folder 787 self.db_codec = db_codec 788 self._after_connection = after_connection 789 class Dummy(object): 790 lastrowid = 1 791 def __getattr__(self, value): 792 return lambda *a, **b: []
793 self.connection = Dummy() 794 self.cursor = Dummy() 795
796 - def sequence_name(self,tablename):
797 return '%s_sequence' % tablename
798
799 - def trigger_name(self,tablename):
800 return '%s_sequence' % tablename
801
802 - def varquote(self,name):
803 return name
804
805 - def create_table(self, table, 806 migrate=True, 807 fake_migrate=False, 808 polymodel=None):
809 db = table._db 810 fields = [] 811 # PostGIS geo fields are added after the table has been created 812 postcreation_fields = [] 813 sql_fields = {} 814 sql_fields_aux = {} 815 TFK = {} 816 tablename = table._tablename 817 sortable = 0 818 types = self.types 819 for field in table: 820 sortable += 1 821 field_name = field.name 822 field_type = field.type 823 if isinstance(field_type,SQLCustomType): 824 ftype = field_type.native or field_type.type 825 elif field_type.startswith('reference'): 826 referenced = field_type[10:].strip() 827 if referenced == '.': 828 referenced = tablename 829 constraint_name = self.constraint_name(tablename, field_name) 830 if not '.' in referenced \ 831 and referenced != tablename \ 832 and hasattr(table,'_primarykey'): 833 ftype = types['integer'] 834 else: 835 if hasattr(table,'_primarykey'): 836 rtablename,rfieldname = referenced.split('.') 837 rtable = db[rtablename] 838 rfield = rtable[rfieldname] 839 # must be PK reference or unique 840 if rfieldname in rtable._primarykey or \ 841 rfield.unique: 842 ftype = types[rfield.type[:9]] % \ 843 dict(length=rfield.length) 844 # multicolumn primary key reference? 845 if not rfield.unique and len(rtable._primarykey)>1: 846 # then it has to be a table level FK 847 if rtablename not in TFK: 848 TFK[rtablename] = {} 849 TFK[rtablename][rfieldname] = field_name 850 else: 851 ftype = ftype + \ 852 types['reference FK'] % dict( 853 constraint_name = constraint_name, # should be quoted 854 foreign_key = '%s (%s)' % (rtablename, 855 rfieldname), 856 table_name = tablename, 857 field_name = field_name, 858 on_delete_action=field.ondelete) 859 else: 860 # make a guess here for circular references 861 if referenced in db: 862 id_fieldname = db[referenced]._id.name 863 elif referenced == tablename: 864 id_fieldname = table._id.name 865 else: #make a guess 866 id_fieldname = 'id' 867 ftype = types[field_type[:9]] % dict( 868 index_name = field_name+'__idx', 869 field_name = field_name, 870 constraint_name = constraint_name, 871 foreign_key = '%s (%s)' % (referenced, 872 id_fieldname), 873 on_delete_action=field.ondelete) 874 elif field_type.startswith('list:reference'): 875 ftype = types[field_type[:14]] 876 elif field_type.startswith('decimal'): 877 precision, scale = map(int,field_type[8:-1].split(',')) 878 ftype = types[field_type[:7]] % \ 879 dict(precision=precision,scale=scale) 880 elif field_type.startswith('geo'): 881 if not hasattr(self,'srid'): 882 raise RuntimeError('Adapter does not support geometry') 883 srid = self.srid 884 geotype, parms = field_type[:-1].split('(') 885 if not geotype in types: 886 raise SyntaxError( 887 'Field: unknown field type: %s for %s' \ 888 % (field_type, field_name)) 889 ftype = types[geotype] 890 if self.dbengine == 'postgres' and geotype == 'geometry': 891 # parameters: schema, srid, dimension 892 dimension = 2 # GIS.dimension ??? 893 parms = parms.split(',') 894 if len(parms) == 3: 895 schema, srid, dimension = parms 896 elif len(parms) == 2: 897 schema, srid = parms 898 else: 899 schema = parms[0] 900 ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype] 901 ftype = ftype % dict(schema=schema, 902 tablename=tablename, 903 fieldname=field_name, srid=srid, 904 dimension=dimension) 905 postcreation_fields.append(ftype) 906 elif not field_type in types: 907 raise SyntaxError('Field: unknown field type: %s for %s' % \ 908 (field_type, field_name)) 909 else: 910 ftype = types[field_type]\ 911 % dict(length=field.length) 912 if not field_type.startswith('id') and \ 913 not field_type.startswith('reference'): 914 if field.notnull: 915 ftype += ' NOT NULL' 916 else: 917 ftype += self.ALLOW_NULL() 918 if field.unique: 919 ftype += ' UNIQUE' 920 if field.custom_qualifier: 921 ftype += ' %s' % field.custom_qualifier 922 923 # add to list of fields 924 sql_fields[field_name] = dict( 925 length=field.length, 926 unique=field.unique, 927 notnull=field.notnull, 928 sortable=sortable, 929 type=str(field_type), 930 sql=ftype) 931 932 if field.notnull and not field.default is None: 933 # Caveat: sql_fields and sql_fields_aux 934 # differ for default values. 935 # sql_fields is used to trigger migrations and sql_fields_aux 936 # is used for create tables. 937 # The reason is that we do not want to trigger 938 # a migration simply because a default value changes. 939 not_null = self.NOT_NULL(field.default, field_type) 940 ftype = ftype.replace('NOT NULL', not_null) 941 sql_fields_aux[field_name] = dict(sql=ftype) 942 # Postgres - PostGIS: 943 # geometry fields are added after the table has been created, not now 944 if not (self.dbengine == 'postgres' and \ 945 field_type.startswith('geom')): 946 fields.append('%s %s' % (field_name, ftype)) 947 other = ';' 948 949 # backend-specific extensions to fields 950 if self.dbengine == 'mysql': 951 if not hasattr(table, "_primarykey"): 952 fields.append('PRIMARY KEY(%s)' % table._id.name) 953 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 954 955 fields = ',\n '.join(fields) 956 for rtablename in TFK: 957 rfields = TFK[rtablename] 958 pkeys = db[rtablename]._primarykey 959 fkeys = [ rfields[k] for k in pkeys ] 960 fields = fields + ',\n ' + \ 961 types['reference TFK'] % dict( 962 table_name = tablename, 963 field_name=', '.join(fkeys), 964 foreign_table = rtablename, 965 foreign_key = ', '.join(pkeys), 966 on_delete_action = field.ondelete) 967 968 if getattr(table,'_primarykey',None): 969 query = "CREATE TABLE %s(\n %s,\n %s) %s" % \ 970 (tablename, fields, 971 self.PRIMARY_KEY(', '.join(table._primarykey)),other) 972 else: 973 query = "CREATE TABLE %s(\n %s\n)%s" % \ 974 (tablename, fields, other) 975 976 if self.uri.startswith('sqlite:///') \ 977 or self.uri.startswith('spatialite:///'): 978 path_encoding = sys.getfilesystemencoding() \ 979 or locale.getdefaultlocale()[1] or 'utf8' 980 dbpath = self.uri[9:self.uri.rfind('/')]\ 981 .decode('utf8').encode(path_encoding) 982 else: 983 dbpath = self.folder 984 985 if not migrate: 986 return query 987 elif self.uri.startswith('sqlite:memory')\ 988 or self.uri.startswith('spatialite:memory'): 989 table._dbt = None 990 elif isinstance(migrate, str): 991 table._dbt = pjoin(dbpath, migrate) 992 else: 993 table._dbt = pjoin( 994 dbpath, '%s_%s.table' % (table._db._uri_hash, tablename)) 995 996 if not table._dbt or not self.file_exists(table._dbt): 997 if table._dbt: 998 self.log('timestamp: %s\n%s\n' 999 % (datetime.datetime.today().isoformat(), 1000 query), table) 1001 if not fake_migrate: 1002 self.create_sequence_and_triggers(query,table) 1003 table._db.commit() 1004 # Postgres geom fields are added now, 1005 # after the table has been created 1006 for query in postcreation_fields: 1007 self.execute(query) 1008 table._db.commit() 1009 if table._dbt: 1010 tfile = self.file_open(table._dbt, 'w') 1011 pickle.dump(sql_fields, tfile) 1012 self.file_close(tfile) 1013 if fake_migrate: 1014 self.log('faked!\n', table) 1015 else: 1016 self.log('success!\n', table) 1017 else: 1018 tfile = self.file_open(table._dbt, 'r') 1019 try: 1020 sql_fields_old = pickle.load(tfile) 1021 except EOFError: 1022 self.file_close(tfile) 1023 raise RuntimeError('File %s appears corrupted' % table._dbt) 1024 self.file_close(tfile) 1025 if sql_fields != sql_fields_old: 1026 self.migrate_table(table, 1027 sql_fields, sql_fields_old, 1028 sql_fields_aux, None, 1029 fake_migrate=fake_migrate) 1030 return query
1031
1032 - def migrate_table( 1033 self, 1034 table, 1035 sql_fields, 1036 sql_fields_old, 1037 sql_fields_aux, 1038 logfile, 1039 fake_migrate=False, 1040 ):
1041 1042 # logfile is deprecated (moved to adapter.log method) 1043 db = table._db 1044 db._migrated.append(table._tablename) 1045 tablename = table._tablename 1046 def fix(item): 1047 k,v=item 1048 if not isinstance(v,dict): 1049 v=dict(type='unknown',sql=v) 1050 return k.lower(),v
1051 # make sure all field names are lower case to avoid 1052 # migrations because of case cahnge 1053 sql_fields = dict(map(fix,sql_fields.iteritems())) 1054 sql_fields_old = dict(map(fix,sql_fields_old.iteritems())) 1055 sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems())) 1056 if db._debug: 1057 logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields)) 1058 1059 keys = sql_fields.keys() 1060 for key in sql_fields_old: 1061 if not key in keys: 1062 keys.append(key) 1063 new_add = self.concat_add(tablename) 1064 1065 metadata_change = False 1066 sql_fields_current = copy.copy(sql_fields_old) 1067 for key in keys: 1068 query = None 1069 if not key in sql_fields_old: 1070 sql_fields_current[key] = sql_fields[key] 1071 if self.dbengine in ('postgres',) and \ 1072 sql_fields[key]['type'].startswith('geometry'): 1073 # 'sql' == ftype in sql 1074 query = [ sql_fields[key]['sql'] ] 1075 else: 1076 query = ['ALTER TABLE %s ADD %s %s;' % \ 1077 (tablename, key, 1078 sql_fields_aux[key]['sql'].replace(', ', new_add))] 1079 metadata_change = True 1080 elif self.dbengine in ('sqlite', 'spatialite'): 1081 if key in sql_fields: 1082 sql_fields_current[key] = sql_fields[key] 1083 metadata_change = True 1084 elif not key in sql_fields: 1085 del sql_fields_current[key] 1086 ftype = sql_fields_old[key]['type'] 1087 if (self.dbengine in ('postgres',) and 1088 ftype.startswith('geometry')): 1089 geotype, parms = ftype[:-1].split('(') 1090 schema = parms.split(',')[0] 1091 query = [ "SELECT DropGeometryColumn ('%(schema)s', "+ 1092 "'%(table)s', '%(field)s');" % 1093 dict(schema=schema, table=tablename, field=key,) ] 1094 elif self.dbengine in ('firebird',): 1095 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 1096 else: 1097 query = ['ALTER TABLE %s DROP COLUMN %s;' % 1098 (tablename, key)] 1099 metadata_change = True 1100 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 1101 and not (key in table.fields and 1102 isinstance(table[key].type, SQLCustomType)) \ 1103 and not sql_fields[key]['type'].startswith('reference')\ 1104 and not sql_fields[key]['type'].startswith('double')\ 1105 and not sql_fields[key]['type'].startswith('id'): 1106 sql_fields_current[key] = sql_fields[key] 1107 t = tablename 1108 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 1109 if self.dbengine in ('firebird',): 1110 drop_expr = 'ALTER TABLE %s DROP %s;' 1111 else: 1112 drop_expr = 'ALTER TABLE %s DROP COLUMN %s;' 1113 key_tmp = key + '__tmp' 1114 query = ['ALTER TABLE %s ADD %s %s;' % (t, key_tmp, tt), 1115 'UPDATE %s SET %s=%s;' % (t, key_tmp, key), 1116 drop_expr % (t, key), 1117 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 1118 'UPDATE %s SET %s=%s;' % (t, key, key_tmp), 1119 drop_expr % (t, key_tmp)] 1120 metadata_change = True 1121 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 1122 sql_fields_current[key] = sql_fields[key] 1123 metadata_change = True 1124 1125 if query: 1126 self.log('timestamp: %s\n' 1127 % datetime.datetime.today().isoformat(), table) 1128 db['_lastsql'] = '\n'.join(query) 1129 for sub_query in query: 1130 self.log(sub_query + '\n', table) 1131 if fake_migrate: 1132 if db._adapter.commit_on_alter_table: 1133 self.save_dbt(table,sql_fields_current) 1134 self.log('faked!\n', table) 1135 else: 1136 self.execute(sub_query) 1137 # Caveat: mysql, oracle and firebird 1138 # do not allow multiple alter table 1139 # in one transaction so we must commit 1140 # partial transactions and 1141 # update table._dbt after alter table. 1142 if db._adapter.commit_on_alter_table: 1143 db.commit() 1144 self.save_dbt(table,sql_fields_current) 1145 self.log('success!\n', table) 1146 1147 elif metadata_change: 1148 self.save_dbt(table,sql_fields_current) 1149 1150 if metadata_change and not (query and db._adapter.commit_on_alter_table): 1151 db.commit() 1152 self.save_dbt(table,sql_fields_current) 1153 self.log('success!\n', table) 1154
1155 - def save_dbt(self,table, sql_fields_current):
1156 tfile = self.file_open(table._dbt, 'w') 1157 pickle.dump(sql_fields_current, tfile) 1158 self.file_close(tfile)
1159
1160 - def LOWER(self, first):
1161 return 'LOWER(%s)' % self.expand(first)
1162
1163 - def UPPER(self, first):
1164 return 'UPPER(%s)' % self.expand(first)
1165
1166 - def COUNT(self, first, distinct=None):
1167 return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \ 1168 % self.expand(first)
1169
1170 - def EXTRACT(self, first, what):
1171 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
1172
1173 - def EPOCH(self, first):
1174 return self.EXTRACT(first, 'epoch')
1175
1176 - def LENGTH(self, first):
1177 return "LENGTH(%s)" % self.expand(first)
1178
1179 - def AGGREGATE(self, first, what):
1180 return "%s(%s)" % (what, self.expand(first))
1181
1182 - def JOIN(self):
1183 return 'JOIN'
1184
1185 - def LEFT_JOIN(self):
1186 return 'LEFT JOIN'
1187
1188 - def RANDOM(self):
1189 return 'Random()'
1190
1191 - def NOT_NULL(self, default, field_type):
1192 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
1193
1194 - def COALESCE(self, first, second):
1195 expressions = [self.expand(first)]+[self.expand(e) for e in second] 1196 return 'COALESCE(%s)' % ','.join(expressions)
1197
1198 - def COALESCE_ZERO(self, first):
1199 return 'COALESCE(%s,0)' % self.expand(first)
1200
1201 - def RAW(self, first):
1202 return first
1203
1204 - def ALLOW_NULL(self):
1205 return ''
1206
1207 - def SUBSTRING(self, field, parameters):
1208 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1209
1210 - def PRIMARY_KEY(self, key):
1211 return 'PRIMARY KEY(%s)' % key
1212
1213 - def _drop(self, table, mode):
1214 return ['DROP TABLE %s;' % table]
1215
1216 - def drop(self, table, mode=''):
1217 db = table._db 1218 queries = self._drop(table, mode) 1219 for query in queries: 1220 if table._dbt: 1221 self.log(query + '\n', table) 1222 self.execute(query) 1223 db.commit() 1224 del db[table._tablename] 1225 del db.tables[db.tables.index(table._tablename)] 1226 db._remove_references_to(table) 1227 if table._dbt: 1228 self.file_delete(table._dbt) 1229 self.log('success!\n', table)
1230
1231 - def _insert(self, table, fields):
1232 if fields: 1233 keys = ','.join(f.name for f, v in fields) 1234 values = ','.join(self.expand(v, f.type) for f, v in fields) 1235 return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values) 1236 else: 1237 return self._insert_empty(table)
1238
1239 - def _insert_empty(self, table):
1240 return 'INSERT INTO %s DEFAULT VALUES;' % table
1241
1242 - def insert(self, table, fields):
1243 query = self._insert(table,fields) 1244 try: 1245 self.execute(query) 1246 except Exception: 1247 e = sys.exc_info()[1] 1248 if hasattr(table,'_on_insert_error'): 1249 return table._on_insert_error(table,fields,e) 1250 raise e 1251 if hasattr(table,'_primarykey'): 1252 return dict([(k[0].name, k[1]) for k in fields \ 1253 if k[0].name in table._primarykey]) 1254 id = self.lastrowid(table) 1255 if not isinstance(id,int): 1256 return id 1257 rid = Reference(id) 1258 (rid._table, rid._record) = (table, None) 1259 return rid
1260
1261 - def bulk_insert(self, table, items):
1262 return [self.insert(table,item) for item in items]
1263
1264 - def NOT(self, first):
1265 return '(NOT %s)' % self.expand(first)
1266
1267 - def AND(self, first, second):
1268 return '(%s AND %s)' % (self.expand(first), self.expand(second))
1269
1270 - def OR(self, first, second):
1271 return '(%s OR %s)' % (self.expand(first), self.expand(second))
1272
1273 - def BELONGS(self, first, second):
1274 if isinstance(second, str): 1275 return '(%s IN (%s))' % (self.expand(first), second[:-1]) 1276 if not second: 1277 return '(1=0)' 1278 items = ','.join(self.expand(item, first.type) for item in second) 1279 return '(%s IN (%s))' % (self.expand(first), items)
1280
1281 - def REGEXP(self, first, second):
1282 "regular expression operator" 1283 raise NotImplementedError
1284
1285 - def LIKE(self, first, second):
1286 "case sensitive like operator" 1287 raise NotImplementedError
1288
1289 - def ILIKE(self, first, second):
1290 "case in-sensitive like operator" 1291 return '(%s LIKE %s)' % (self.expand(first), 1292 self.expand(second, 'string'))
1293
1294 - def STARTSWITH(self, first, second):
1295 return '(%s LIKE %s)' % (self.expand(first), 1296 self.expand(second+'%', 'string'))
1297
1298 - def ENDSWITH(self, first, second):
1299 return '(%s LIKE %s)' % (self.expand(first), 1300 self.expand('%'+second, 'string'))
1301
1302 - def CONTAINS(self,first,second,case_sensitive=False):
1303 if first.type in ('string','text', 'json'): 1304 if isinstance(second,Expression): 1305 second = Expression(None,self.CONCAT('%',Expression( 1306 None,self.REPLACE(second,('%','%%'))),'%')) 1307 else: 1308 second = '%'+str(second).replace('%','%%')+'%' 1309 elif first.type.startswith('list:'): 1310 if isinstance(second,Expression): 1311 second = Expression(None,self.CONCAT( 1312 '%|',Expression(None,self.REPLACE( 1313 Expression(None,self.REPLACE( 1314 second,('%','%%'))),('|','||'))),'|%')) 1315 else: 1316 second = '%|'+str(second).replace('%','%%')\ 1317 .replace('|','||')+'|%' 1318 op = case_sensitive and self.LIKE or self.ILIKE 1319 return op(first,second)
1320
1321 - def EQ(self, first, second=None):
1322 if second is None: 1323 return '(%s IS NULL)' % self.expand(first) 1324 return '(%s = %s)' % (self.expand(first), 1325 self.expand(second, first.type))
1326
1327 - def NE(self, first, second=None):
1328 if second is None: 1329 return '(%s IS NOT NULL)' % self.expand(first) 1330 return '(%s <> %s)' % (self.expand(first), 1331 self.expand(second, first.type))
1332
1333 - def LT(self,first,second=None):
1334 if second is None: 1335 raise RuntimeError("Cannot compare %s < None" % first) 1336 return '(%s < %s)' % (self.expand(first), 1337 self.expand(second,first.type))
1338
1339 - def LE(self,first,second=None):
1340 if second is None: 1341 raise RuntimeError("Cannot compare %s <= None" % first) 1342 return '(%s <= %s)' % (self.expand(first), 1343 self.expand(second,first.type))
1344
1345 - def GT(self,first,second=None):
1346 if second is None: 1347 raise RuntimeError("Cannot compare %s > None" % first) 1348 return '(%s > %s)' % (self.expand(first), 1349 self.expand(second,first.type))
1350
1351 - def GE(self,first,second=None):
1352 if second is None: 1353 raise RuntimeError("Cannot compare %s >= None" % first) 1354 return '(%s >= %s)' % (self.expand(first), 1355 self.expand(second,first.type))
1356
1357 - def is_numerical_type(self, ftype):
1358 return ftype in ('integer','boolean','double','bigint') or \ 1359 ftype.startswith('decimal')
1360
1361 - def REPLACE(self, first, (second, third)):
1362 return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'), 1363 self.expand(second,'string'), 1364 self.expand(third,'string'))
1365
1366 - def CONCAT(self, *items):
1367 return '(%s)' % ' || '.join(self.expand(x,'string') for x in items)
1368
1369 - def ADD(self, first, second):
1370 if self.is_numerical_type(first.type): 1371 return '(%s + %s)' % (self.expand(first), 1372 self.expand(second, first.type)) 1373 else: 1374 return self.CONCAT(first, second)
1375
1376 - def SUB(self, first, second):
1377 return '(%s - %s)' % (self.expand(first), 1378 self.expand(second, first.type))
1379
1380 - def MUL(self, first, second):
1381 return '(%s * %s)' % (self.expand(first), 1382 self.expand(second, first.type))
1383
1384 - def DIV(self, first, second):
1385 return '(%s / %s)' % (self.expand(first), 1386 self.expand(second, first.type))
1387
1388 - def MOD(self, first, second):
1389 return '(%s %% %s)' % (self.expand(first), 1390 self.expand(second, first.type))
1391
1392 - def AS(self, first, second):
1393 return '%s AS %s' % (self.expand(first), second)
1394
1395 - def ON(self, first, second):
1396 if use_common_filters(second): 1397 second = self.common_filter(second,[first._tablename]) 1398 return '%s ON %s' % (self.expand(first), self.expand(second))
1399
1400 - def INVERT(self, first):
1401 return '%s DESC' % self.expand(first)
1402
1403 - def COMMA(self, first, second):
1404 return '%s, %s' % (self.expand(first), self.expand(second))
1405
1406 - def expand(self, expression, field_type=None):
1407 if isinstance(expression, Field): 1408 out = '%s.%s' % (expression.table._tablename, expression.name) 1409 if field_type == 'string' and not expression.type in ( 1410 'string','text','json','password'): 1411 out = 'CAST(%s AS %s)' % (out, self.types['text']) 1412 return out 1413 elif isinstance(expression, (Expression, Query)): 1414 first = expression.first 1415 second = expression.second 1416 op = expression.op 1417 optional_args = expression.optional_args or {} 1418 if not second is None: 1419 out = op(first, second, **optional_args) 1420 elif not first is None: 1421 out = op(first,**optional_args) 1422 elif isinstance(op, str): 1423 if op.endswith(';'): 1424 op=op[:-1] 1425 out = '(%s)' % op 1426 else: 1427 out = op() 1428 return out 1429 elif field_type: 1430 return str(self.represent(expression,field_type)) 1431 elif isinstance(expression,(list,tuple)): 1432 return ','.join(self.represent(item,field_type) \ 1433 for item in expression) 1434 elif isinstance(expression, bool): 1435 return '1' if expression else '0' 1436 else: 1437 return str(expression)
1438
1439 - def table_alias(self,name):
1440 return str(name if isinstance(name,Table) else self.db[name])
1441
1442 - def alias(self, table, alias):
1443 """ 1444 Given a table object, makes a new table object 1445 with alias name. 1446 """ 1447 other = copy.copy(table) 1448 other['_ot'] = other._ot or other._tablename 1449 other['ALL'] = SQLALL(other) 1450 other['_tablename'] = alias 1451 for fieldname in other.fields: 1452 other[fieldname] = copy.copy(other[fieldname]) 1453 other[fieldname]._tablename = alias 1454 other[fieldname].tablename = alias 1455 other[fieldname].table = other 1456 table._db[alias] = other 1457 return other
1458
1459 - def _truncate(self, table, mode=''):
1460 tablename = table._tablename 1461 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
1462
1463 - def truncate(self, table, mode= ' '):
1464 # Prepare functions "write_to_logfile" and "close_logfile" 1465 try: 1466 queries = table._db._adapter._truncate(table, mode) 1467 for query in queries: 1468 self.log(query + '\n', table) 1469 self.execute(query) 1470 table._db.commit() 1471 self.log('success!\n', table) 1472 finally: 1473 pass
1474
1475 - def _update(self, tablename, query, fields):
1476 if query: 1477 if use_common_filters(query): 1478 query = self.common_filter(query, [tablename]) 1479 sql_w = ' WHERE ' + self.expand(query) 1480 else: 1481 sql_w = '' 1482 sql_v = ','.join(['%s=%s' % (field.name, 1483 self.expand(value, field.type)) \ 1484 for (field, value) in fields]) 1485 tablename = "%s" % self.db[tablename] 1486 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1487
1488 - def update(self, tablename, query, fields):
1489 sql = self._update(tablename, query, fields) 1490 try: 1491 self.execute(sql) 1492 except Exception: 1493 e = sys.exc_info()[1] 1494 table = self.db[tablename] 1495 if hasattr(table,'_on_update_error'): 1496 return table._on_update_error(table,query,fields,e) 1497 raise e 1498 try: 1499 return self.cursor.rowcount 1500 except: 1501 return None
1502
1503 - def _delete(self, tablename, query):
1504 if query: 1505 if use_common_filters(query): 1506 query = self.common_filter(query, [tablename]) 1507 sql_w = ' WHERE ' + self.expand(query) 1508 else: 1509 sql_w = '' 1510 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1511
1512 - def delete(self, tablename, query):
1513 sql = self._delete(tablename, query) 1514 ### special code to handle CASCADE in SQLite & SpatiaLite 1515 db = self.db 1516 table = db[tablename] 1517 if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by: 1518 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1519 ### end special code to handle CASCADE in SQLite & SpatiaLite 1520 self.execute(sql) 1521 try: 1522 counter = self.cursor.rowcount 1523 except: 1524 counter = None 1525 ### special code to handle CASCADE in SQLite & SpatiaLite 1526 if self.dbengine in ('sqlite', 'spatialite') and counter: 1527 for field in table._referenced_by: 1528 if field.type=='reference '+table._tablename \ 1529 and field.ondelete=='CASCADE': 1530 db(field.belongs(deleted)).delete() 1531 ### end special code to handle CASCADE in SQLite & SpatiaLite 1532 return counter
1533
1534 - def get_table(self, query):
1535 tablenames = self.tables(query) 1536 if len(tablenames)==1: 1537 return tablenames[0] 1538 elif len(tablenames)<1: 1539 raise RuntimeError("No table selected") 1540 else: 1541 raise RuntimeError("Too many tables selected")
1542
1543 - def expand_all(self, fields, tablenames):
1544 db = self.db 1545 new_fields = [] 1546 append = new_fields.append 1547 for item in fields: 1548 if isinstance(item,SQLALL): 1549 new_fields += item._table 1550 elif isinstance(item,str): 1551 if REGEX_TABLE_DOT_FIELD.match(item): 1552 tablename,fieldname = item.split('.') 1553 append(db[tablename][fieldname]) 1554 else: 1555 append(Expression(db,lambda item=item:item)) 1556 else: 1557 append(item) 1558 # ## if no fields specified take them all from the requested tables 1559 if not new_fields: 1560 for table in tablenames: 1561 for field in db[table]: 1562 append(field) 1563 return new_fields
1564
1565 - def _select(self, query, fields, attributes):
1566 tables = self.tables 1567 for key in set(attributes.keys())-SELECT_ARGS: 1568 raise SyntaxError('invalid select attribute: %s' % key) 1569 args_get = attributes.get 1570 tablenames = tables(query) 1571 tablenames_for_common_filters = tablenames 1572 for field in fields: 1573 if isinstance(field, basestring) \ 1574 and REGEX_TABLE_DOT_FIELD.match(field): 1575 tn,fn = field.split('.') 1576 field = self.db[tn][fn] 1577 for tablename in tables(field): 1578 if not tablename in tablenames: 1579 tablenames.append(tablename) 1580 1581 if len(tablenames) < 1: 1582 raise SyntaxError('Set: no tables selected') 1583 self._colnames = map(self.expand, fields) 1584 def geoexpand(field): 1585 if isinstance(field.type,str) and field.type.startswith('geometry'): 1586 field = field.st_astext() 1587 return self.expand(field)
1588 sql_f = ', '.join(map(geoexpand, fields)) 1589 sql_o = '' 1590 sql_s = '' 1591 left = args_get('left', False) 1592 inner_join = args_get('join', False) 1593 distinct = args_get('distinct', False) 1594 groupby = args_get('groupby', False) 1595 orderby = args_get('orderby', False) 1596 having = args_get('having', False) 1597 limitby = args_get('limitby', False) 1598 orderby_on_limitby = args_get('orderby_on_limitby', True) 1599 for_update = args_get('for_update', False) 1600 if self.can_select_for_update is False and for_update is True: 1601 raise SyntaxError('invalid select attribute: for_update') 1602 if distinct is True: 1603 sql_s += 'DISTINCT' 1604 elif distinct: 1605 sql_s += 'DISTINCT ON (%s)' % distinct 1606 if inner_join: 1607 icommand = self.JOIN() 1608 if not isinstance(inner_join, (tuple, list)): 1609 inner_join = [inner_join] 1610 ijoint = [t._tablename for t in inner_join 1611 if not isinstance(t,Expression)] 1612 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1613 itables_to_merge={} #issue 490 1614 [itables_to_merge.update( 1615 dict.fromkeys(tables(t))) for t in ijoinon] 1616 ijoinont = [t.first._tablename for t in ijoinon] 1617 [itables_to_merge.pop(t) for t in ijoinont 1618 if t in itables_to_merge] #issue 490 1619 iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys() 1620 iexcluded = [t for t in tablenames 1621 if not t in iimportant_tablenames] 1622 if left: 1623 join = attributes['left'] 1624 command = self.LEFT_JOIN() 1625 if not isinstance(join, (tuple, list)): 1626 join = [join] 1627 joint = [t._tablename for t in join 1628 if not isinstance(t, Expression)] 1629 joinon = [t for t in join if isinstance(t, Expression)] 1630 #patch join+left patch (solves problem with ordering in left joins) 1631 tables_to_merge={} 1632 [tables_to_merge.update( 1633 dict.fromkeys(tables(t))) for t in joinon] 1634 joinont = [t.first._tablename for t in joinon] 1635 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1636 tablenames_for_common_filters = [t for t in tablenames 1637 if not t in joinont ] 1638 important_tablenames = joint + joinont + tables_to_merge.keys() 1639 excluded = [t for t in tablenames 1640 if not t in important_tablenames ] 1641 else: 1642 excluded = tablenames 1643 1644 if use_common_filters(query): 1645 query = self.common_filter(query,tablenames_for_common_filters) 1646 sql_w = ' WHERE ' + self.expand(query) if query else '' 1647 1648 if inner_join and not left: 1649 sql_t = ', '.join([self.table_alias(t) for t in iexcluded + \ 1650 itables_to_merge.keys()]) 1651 for t in ijoinon: 1652 sql_t += ' %s %s' % (icommand, t) 1653 elif not inner_join and left: 1654 sql_t = ', '.join([self.table_alias(t) for t in excluded + \ 1655 tables_to_merge.keys()]) 1656 if joint: 1657 sql_t += ' %s %s' % (command, 1658 ','.join([self.table_alias(t) for t in joint])) 1659 for t in joinon: 1660 sql_t += ' %s %s' % (command, t) 1661 elif inner_join and left: 1662 all_tables_in_query = set(important_tablenames + \ 1663 iimportant_tablenames + \ 1664 tablenames) 1665 tables_in_joinon = set(joinont + ijoinont) 1666 tables_not_in_joinon = \ 1667 all_tables_in_query.difference(tables_in_joinon) 1668 sql_t = ','.join([self.table_alias(t) for t in tables_not_in_joinon]) 1669 for t in ijoinon: 1670 sql_t += ' %s %s' % (icommand, t) 1671 if joint: 1672 sql_t += ' %s %s' % (command, 1673 ','.join([self.table_alias(t) for t in joint])) 1674 for t in joinon: 1675 sql_t += ' %s %s' % (command, t) 1676 else: 1677 sql_t = ', '.join(self.table_alias(t) for t in tablenames) 1678 if groupby: 1679 if isinstance(groupby, (list, tuple)): 1680 groupby = xorify(groupby) 1681 sql_o += ' GROUP BY %s' % self.expand(groupby) 1682 if having: 1683 sql_o += ' HAVING %s' % attributes['having'] 1684 if orderby: 1685 if isinstance(orderby, (list, tuple)): 1686 orderby = xorify(orderby) 1687 if str(orderby) == '<random>': 1688 sql_o += ' ORDER BY %s' % self.RANDOM() 1689 else: 1690 sql_o += ' ORDER BY %s' % self.expand(orderby) 1691 if (limitby and not groupby and tablenames and orderby_on_limitby and not orderby): 1692 sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in (hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey or [self.db[t]._id.name])]) 1693 # oracle does not support limitby 1694 sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1695 if for_update and self.can_select_for_update is True: 1696 sql = sql.rstrip(';') + ' FOR UPDATE;' 1697 return sql 1698
1699 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1700 if limitby: 1701 (lmin, lmax) = limitby 1702 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1703 return 'SELECT %s %s FROM %s%s%s;' % \ 1704 (sql_s, sql_f, sql_t, sql_w, sql_o)
1705
1706 - def _fetchall(self):
1707 return self.cursor.fetchall()
1708
1709 - def _select_aux(self,sql,fields,attributes):
1710 args_get = attributes.get 1711 cache = args_get('cache',None) 1712 if not cache: 1713 self.execute(sql) 1714 rows = self._fetchall() 1715 else: 1716 (cache_model, time_expire) = cache 1717 key = self.uri + '/' + sql + '/rows' 1718 if len(key)>200: key = hashlib_md5(key).hexdigest() 1719 def _select_aux2(): 1720 self.execute(sql) 1721 return self._fetchall()
1722 rows = cache_model(key,_select_aux2,time_expire) 1723 if isinstance(rows,tuple): 1724 rows = list(rows) 1725 limitby = args_get('limitby', None) or (0,) 1726 rows = self.rowslice(rows,limitby[0],None) 1727 processor = args_get('processor',self.parse) 1728 cacheable = args_get('cacheable',False) 1729 return processor(rows,fields,self._colnames,cacheable=cacheable) 1730
1731 - def select(self, query, fields, attributes):
1732 """ 1733 Always returns a Rows object, possibly empty. 1734 """ 1735 sql = self._select(query, fields, attributes) 1736 cache = attributes.get('cache', None) 1737 if cache and attributes.get('cacheable',False): 1738 del attributes['cache'] 1739 (cache_model, time_expire) = cache 1740 key = self.uri + '/' + sql 1741 if len(key)>200: key = hashlib_md5(key).hexdigest() 1742 args = (sql,fields,attributes) 1743 return cache_model( 1744 key, 1745 lambda self=self,args=args:self._select_aux(*args), 1746 time_expire) 1747 else: 1748 return self._select_aux(sql,fields,attributes)
1749
1750 - def _count(self, query, distinct=None):
1751 tablenames = self.tables(query) 1752 if query: 1753 if use_common_filters(query): 1754 query = self.common_filter(query, tablenames) 1755 sql_w = ' WHERE ' + self.expand(query) 1756 else: 1757 sql_w = '' 1758 sql_t = ','.join(self.table_alias(t) for t in tablenames) 1759 if distinct: 1760 if isinstance(distinct,(list, tuple)): 1761 distinct = xorify(distinct) 1762 sql_d = self.expand(distinct) 1763 return 'SELECT count(DISTINCT %s) FROM %s%s;' % \ 1764 (sql_d, sql_t, sql_w) 1765 return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
1766
1767 - def count(self, query, distinct=None):
1768 self.execute(self._count(query, distinct)) 1769 return self.cursor.fetchone()[0]
1770
1771 - def tables(self, *queries):
1772 tables = set() 1773 for query in queries: 1774 if isinstance(query, Field): 1775 tables.add(query.tablename) 1776 elif isinstance(query, (Expression, Query)): 1777 if not query.first is None: 1778 tables = tables.union(self.tables(query.first)) 1779 if not query.second is None: 1780 tables = tables.union(self.tables(query.second)) 1781 return list(tables)
1782
1783 - def commit(self):
1784 if self.connection: 1785 return self.connection.commit()
1786
1787 - def rollback(self):
1788 if self.connection: 1789 return self.connection.rollback()
1790
1791 - def close_connection(self):
1792 if self.connection: 1793 r = self.connection.close() 1794 self.connection = None 1795 return r
1796
1797 - def distributed_transaction_begin(self, key):
1798 return
1799
1800 - def prepare(self, key):
1801 if self.connection: self.connection.prepare()
1802
1803 - def commit_prepared(self, key):
1804 if self.connection: self.connection.commit()
1805
1806 - def rollback_prepared(self, key):
1807 if self.connection: self.connection.rollback()
1808
1809 - def concat_add(self, tablename):
1810 return ', ADD '
1811
1812 - def constraint_name(self, table, fieldname):
1813 return '%s_%s__constraint' % (table,fieldname)
1814
1815 - def create_sequence_and_triggers(self, query, table, **args):
1816 self.execute(query)
1817
1818 - def log_execute(self, *a, **b):
1819 if not self.connection: return None 1820 command = a[0] 1821 if hasattr(self,'filter_sql_command'): 1822 command = self.filter_sql_command(command) 1823 if self.db._debug: 1824 LOGGER.debug('SQL: %s' % command) 1825 self.db._lastsql = command 1826 t0 = time.time() 1827 ret = self.cursor.execute(command, *a[1:], **b) 1828 self.db._timings.append((command,time.time()-t0)) 1829 del self.db._timings[:-TIMINGSSIZE] 1830 return ret
1831
1832 - def execute(self, *a, **b):
1833 return self.log_execute(*a, **b)
1834
1835 - def represent(self, obj, fieldtype):
1836 field_is_type = fieldtype.startswith 1837 if isinstance(obj, CALLABLETYPES): 1838 obj = obj() 1839 if isinstance(fieldtype, SQLCustomType): 1840 value = fieldtype.encoder(obj) 1841 if fieldtype.type in ('string','text', 'json'): 1842 return self.adapt(value) 1843 return value 1844 if isinstance(obj, (Expression, Field)): 1845 return str(obj) 1846 if field_is_type('list:'): 1847 if not obj: 1848 obj = [] 1849 elif not isinstance(obj, (list, tuple)): 1850 obj = [obj] 1851 if field_is_type('list:string'): 1852 obj = map(str,obj) 1853 else: 1854 obj = map(int,[o for o in obj if o != '']) 1855 # we don't want to bar_encode json objects 1856 if isinstance(obj, (list, tuple)) and (not fieldtype == "json"): 1857 obj = bar_encode(obj) 1858 if obj is None: 1859 return 'NULL' 1860 if obj == '' and not fieldtype[:2] in ['st', 'te', 'js', 'pa', 'up']: 1861 return 'NULL' 1862 r = self.represent_exceptions(obj, fieldtype) 1863 if not r is None: 1864 return r 1865 if fieldtype == 'boolean': 1866 if obj and not str(obj)[:1].upper() in '0F': 1867 return self.smart_adapt(self.TRUE) 1868 else: 1869 return self.smart_adapt(self.FALSE) 1870 if fieldtype == 'id' or fieldtype == 'integer': 1871 return str(long(obj)) 1872 if field_is_type('decimal'): 1873 return str(obj) 1874 elif field_is_type('reference'): # reference 1875 if fieldtype.find('.')>0: 1876 return repr(obj) 1877 elif isinstance(obj, (Row, Reference)): 1878 return str(obj['id']) 1879 return str(long(obj)) 1880 elif fieldtype == 'double': 1881 return repr(float(obj)) 1882 if isinstance(obj, unicode): 1883 obj = obj.encode(self.db_codec) 1884 if fieldtype == 'blob': 1885 obj = base64.b64encode(str(obj)) 1886 elif fieldtype == 'date': 1887 if isinstance(obj, (datetime.date, datetime.datetime)): 1888 obj = obj.isoformat()[:10] 1889 else: 1890 obj = str(obj) 1891 elif fieldtype == 'datetime': 1892 if isinstance(obj, datetime.datetime): 1893 obj = obj.isoformat(self.T_SEP)[:19] 1894 elif isinstance(obj, datetime.date): 1895 obj = obj.isoformat()[:10]+self.T_SEP+'00:00:00' 1896 else: 1897 obj = str(obj) 1898 elif fieldtype == 'time': 1899 if isinstance(obj, datetime.time): 1900 obj = obj.isoformat()[:10] 1901 else: 1902 obj = str(obj) 1903 elif fieldtype == 'json': 1904 if not self.native_json: 1905 if have_serializers: 1906 obj = serializers.json(obj) 1907 elif simplejson: 1908 obj = simplejson.dumps(obj) 1909 else: 1910 raise RuntimeError("missing simplejson") 1911 if not isinstance(obj,bytes): 1912 obj = bytes(obj) 1913 try: 1914 obj.decode(self.db_codec) 1915 except: 1916 obj = obj.decode('latin1').encode(self.db_codec) 1917 return self.adapt(obj)
1918
1919 - def represent_exceptions(self, obj, fieldtype):
1920 return None
1921
1922 - def lastrowid(self, table):
1923 return None
1924
1925 - def rowslice(self, rows, minimum=0, maximum=None):
1926 """ 1927 By default this function does nothing; 1928 overload when db does not do slicing. 1929 """ 1930 return rows
1931
1932 - def parse_value(self, value, field_type, blob_decode=True):
1933 if field_type != 'blob' and isinstance(value, str): 1934 try: 1935 value = value.decode(self.db._db_codec) 1936 except Exception: 1937 pass 1938 if isinstance(value, unicode): 1939 value = value.encode('utf-8') 1940 if isinstance(field_type, SQLCustomType): 1941 value = field_type.decoder(value) 1942 if not isinstance(field_type, str) or value is None: 1943 return value 1944 elif field_type in ('string', 'text', 'password', 'upload', 'dict'): 1945 return value 1946 elif field_type.startswith('geo'): 1947 return value 1948 elif field_type == 'blob' and not blob_decode: 1949 return value 1950 else: 1951 key = REGEX_TYPE.match(field_type).group(0) 1952 return self.parsemap[key](value,field_type)
1953
1954 - def parse_reference(self, value, field_type):
1955 referee = field_type[10:].strip() 1956 if not '.' in referee: 1957 value = Reference(value) 1958 value._table, value._record = self.db[referee], None 1959 return value
1960
1961 - def parse_boolean(self, value, field_type):
1962 return value == self.TRUE or str(value)[:1].lower() == 't'
1963
1964 - def parse_date(self, value, field_type):
1965 if isinstance(value, datetime.datetime): 1966 return value.date() 1967 if not isinstance(value, (datetime.date,datetime.datetime)): 1968 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 1969 value = datetime.date(y, m, d) 1970 return value
1971
1972 - def parse_time(self, value, field_type):
1973 if not isinstance(value, datetime.time): 1974 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 1975 if len(time_items) == 3: 1976 (h, mi, s) = time_items 1977 else: 1978 (h, mi, s) = time_items + [0] 1979 value = datetime.time(h, mi, s) 1980 return value
1981
1982 - def parse_datetime(self, value, field_type):
1983 if not isinstance(value, datetime.datetime): 1984 value = str(value) 1985 date_part,time_part,timezone = value[:10],value[11:19],value[19:] 1986 if '+' in timezone: 1987 ms,tz = timezone.split('+') 1988 h,m = tz.split(':') 1989 dt = datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1990 elif '-' in timezone: 1991 ms,tz = timezone.split('-') 1992 h,m = tz.split(':') 1993 dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1994 else: 1995 dt = None 1996 (y, m, d) = map(int,date_part.split('-')) 1997 time_parts = time_part and time_part.split(':')[:3] or (0,0,0) 1998 while len(time_parts)<3: time_parts.append(0) 1999 time_items = map(int,time_parts) 2000 (h, mi, s) = time_items 2001 value = datetime.datetime(y, m, d, h, mi, s) 2002 if dt: 2003 value = value + dt 2004 return value
2005
2006 - def parse_blob(self, value, field_type):
2007 return base64.b64decode(str(value))
2008
2009 - def parse_decimal(self, value, field_type):
2010 decimals = int(field_type[8:-1].split(',')[-1]) 2011 if self.dbengine in ('sqlite', 'spatialite'): 2012 value = ('%.' + str(decimals) + 'f') % value 2013 if not isinstance(value, decimal.Decimal): 2014 value = decimal.Decimal(str(value)) 2015 return value
2016
2017 - def parse_list_integers(self, value, field_type):
2018 if not isinstance(self, NoSQLAdapter): 2019 value = bar_decode_integer(value) 2020 return value
2021
2022 - def parse_list_references(self, value, field_type):
2023 if not isinstance(self, NoSQLAdapter): 2024 value = bar_decode_integer(value) 2025 return [self.parse_reference(r, field_type[5:]) for r in value]
2026
2027 - def parse_list_strings(self, value, field_type):
2028 if not isinstance(self, NoSQLAdapter): 2029 value = bar_decode_string(value) 2030 return value
2031
2032 - def parse_id(self, value, field_type):
2033 return long(value)
2034
2035 - def parse_integer(self, value, field_type):
2036 return long(value)
2037
2038 - def parse_double(self, value, field_type):
2039 return float(value)
2040
2041 - def parse_json(self, value, field_type):
2042 if not self.native_json: 2043 if not isinstance(value, basestring): 2044 raise RuntimeError('json data not a string') 2045 if isinstance(value, unicode): 2046 value = value.encode('utf-8') 2047 if have_serializers: 2048 value = serializers.loads_json(value) 2049 elif simplejson: 2050 value = simplejson.loads(value) 2051 else: 2052 raise RuntimeError("missing simplejson") 2053 return value
2054
2055 - def build_parsemap(self):
2056 self.parsemap = { 2057 'id':self.parse_id, 2058 'integer':self.parse_integer, 2059 'bigint':self.parse_integer, 2060 'float':self.parse_double, 2061 'double':self.parse_double, 2062 'reference':self.parse_reference, 2063 'boolean':self.parse_boolean, 2064 'date':self.parse_date, 2065 'time':self.parse_time, 2066 'datetime':self.parse_datetime, 2067 'blob':self.parse_blob, 2068 'decimal':self.parse_decimal, 2069 'json':self.parse_json, 2070 'list:integer':self.parse_list_integers, 2071 'list:reference':self.parse_list_references, 2072 'list:string':self.parse_list_strings, 2073 }
2074
2075 - def parse(self, rows, fields, colnames, blob_decode=True, 2076 cacheable = False):
2077 db = self.db 2078 virtualtables = [] 2079 new_rows = [] 2080 tmps = [] 2081 for colname in colnames: 2082 if not REGEX_TABLE_DOT_FIELD.match(colname): 2083 tmps.append(None) 2084 else: 2085 (tablename, _the_sep_, fieldname) = colname.partition('.') 2086 table = db[tablename] 2087 field = table[fieldname] 2088 ft = field.type 2089 tmps.append((tablename,fieldname,table,field,ft)) 2090 for (i,row) in enumerate(rows): 2091 new_row = Row() 2092 for (j,colname) in enumerate(colnames): 2093 value = row[j] 2094 tmp = tmps[j] 2095 if tmp: 2096 (tablename,fieldname,table,field,ft) = tmp 2097 if tablename in new_row: 2098 colset = new_row[tablename] 2099 else: 2100 colset = new_row[tablename] = Row() 2101 if tablename not in virtualtables: 2102 virtualtables.append(tablename) 2103 value = self.parse_value(value,ft,blob_decode) 2104 if field.filter_out: 2105 value = field.filter_out(value) 2106 colset[fieldname] = value 2107 2108 # for backward compatibility 2109 if ft=='id' and fieldname!='id' and \ 2110 not 'id' in table.fields: 2111 colset['id'] = value 2112 2113 if ft == 'id' and not cacheable: 2114 # temporary hack to deal with 2115 # GoogleDatastoreAdapter 2116 # references 2117 if isinstance(self, GoogleDatastoreAdapter): 2118 id = value.key().id_or_name() 2119 colset[fieldname] = id 2120 colset.gae_item = value 2121 else: 2122 id = value 2123 colset.update_record = RecordUpdater(colset,table,id) 2124 colset.delete_record = RecordDeleter(table,id) 2125 if table._db._lazy_tables: 2126 colset['__get_lazy_reference__'] = LazyReferenceGetter(table, id) 2127 for rfield in table._referenced_by: 2128 referee_link = db._referee_name and \ 2129 db._referee_name % dict( 2130 table=rfield.tablename,field=rfield.name) 2131 if referee_link and not referee_link in colset: 2132 colset[referee_link] = LazySet(rfield,id) 2133 else: 2134 if not '_extra' in new_row: 2135 new_row['_extra'] = Row() 2136 new_row['_extra'][colname] = \ 2137 self.parse_value(value, 2138 fields[j].type,blob_decode) 2139 new_column_name = \ 2140 REGEX_SELECT_AS_PARSER.search(colname) 2141 if not new_column_name is None: 2142 column_name = new_column_name.groups(0) 2143 setattr(new_row,column_name[0],value) 2144 new_rows.append(new_row) 2145 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 2146 2147 2148 for tablename in virtualtables: 2149 table = db[tablename] 2150 fields_virtual = [(f,v) for (f,v) in table.iteritems() 2151 if isinstance(v,FieldVirtual)] 2152 fields_lazy = [(f,v) for (f,v) in table.iteritems() 2153 if isinstance(v,FieldMethod)] 2154 if fields_virtual or fields_lazy: 2155 for row in rowsobj.records: 2156 box = row[tablename] 2157 for f,v in fields_virtual: 2158 try: 2159 box[f] = v.f(row) 2160 except AttributeError: 2161 pass # not enough fields to define virtual field 2162 for f,v in fields_lazy: 2163 try: 2164 box[f] = (v.handler or VirtualCommand)(v.f,row) 2165 except AttributeError: 2166 pass # not enough fields to define virtual field 2167 2168 ### old style virtual fields 2169 for item in table.virtualfields: 2170 try: 2171 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 2172 except (KeyError, AttributeError): 2173 # to avoid breaking virtualfields when partial select 2174 pass 2175 return rowsobj
2176
2177 - def common_filter(self, query, tablenames):
2178 tenant_fieldname = self.db._request_tenant 2179 2180 for tablename in tablenames: 2181 table = self.db[tablename] 2182 2183 # deal with user provided filters 2184 if table._common_filter != None: 2185 query = query & table._common_filter(query) 2186 2187 # deal with multi_tenant filters 2188 if tenant_fieldname in table: 2189 default = table[tenant_fieldname].default 2190 if not default is None: 2191 newquery = table[tenant_fieldname] == default 2192 if query is None: 2193 query = newquery 2194 else: 2195 query = query & newquery 2196 return query
2197
2198 - def CASE(self,query,t,f):
2199 def represent(x): 2200 types = {type(True):'boolean',type(0):'integer',type(1.0):'double'} 2201 if x is None: return 'NULL' 2202 elif isinstance(x,Expression): return str(x) 2203 else: return self.represent(x,types.get(type(x),'string'))
2204 return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \ 2205 (self.expand(query),represent(t),represent(f))) 2206
2207 ################################################################################### 2208 # List of all the available adapters; they all extend BaseAdapter. 2209 ################################################################################### 2210 2211 -class SQLiteAdapter(BaseAdapter):
2212 drivers = ('sqlite2','sqlite3') 2213 2214 can_select_for_update = None # support ourselves with BEGIN TRANSACTION 2215
2216 - def EXTRACT(self,field,what):
2217 return "web2py_extract('%s',%s)" % (what, self.expand(field))
2218 2219 @staticmethod
2220 - def web2py_extract(lookup, s):
2221 table = { 2222 'year': (0, 4), 2223 'month': (5, 7), 2224 'day': (8, 10), 2225 'hour': (11, 13), 2226 'minute': (14, 16), 2227 'second': (17, 19), 2228 } 2229 try: 2230 if lookup != 'epoch': 2231 (i, j) = table[lookup] 2232 return int(s[i:j]) 2233 else: 2234 return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple()) 2235 except: 2236 return None
2237 2238 @staticmethod
2239 - def web2py_regexp(expression, item):
2240 return re.compile(expression).search(item) is not None
2241
2242 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2243 credential_decoder=IDENTITY, driver_args={}, 2244 adapter_args={}, do_connect=True, after_connection=None):
2245 self.db = db 2246 self.dbengine = "sqlite" 2247 self.uri = uri 2248 if do_connect: self.find_driver(adapter_args) 2249 self.pool_size = 0 2250 self.folder = folder 2251 self.db_codec = db_codec 2252 self._after_connection = after_connection 2253 self.find_or_make_work_folder() 2254 path_encoding = sys.getfilesystemencoding() \ 2255 or locale.getdefaultlocale()[1] or 'utf8' 2256 if uri.startswith('sqlite:memory'): 2257 self.dbpath = ':memory:' 2258 else: 2259 self.dbpath = uri.split('://',1)[1] 2260 if self.dbpath[0] != '/': 2261 if PYTHON_VERSION == 2: 2262 self.dbpath = pjoin( 2263 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2264 else: 2265 self.dbpath = pjoin(self.folder, self.dbpath) 2266 if not 'check_same_thread' in driver_args: 2267 driver_args['check_same_thread'] = False 2268 if not 'detect_types' in driver_args and do_connect: 2269 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2270 def connector(dbpath=self.dbpath, driver_args=driver_args): 2271 return self.driver.Connection(dbpath, **driver_args)
2272 self.connector = connector 2273 if do_connect: self.reconnect()
2274
2275 - def after_connection(self):
2276 self.connection.create_function('web2py_extract', 2, 2277 SQLiteAdapter.web2py_extract) 2278 self.connection.create_function("REGEXP", 2, 2279 SQLiteAdapter.web2py_regexp)
2280
2281 - def _truncate(self, table, mode=''):
2282 tablename = table._tablename 2283 return ['DELETE FROM %s;' % tablename, 2284 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
2285
2286 - def lastrowid(self, table):
2287 return self.cursor.lastrowid
2288
2289 - def REGEXP(self,first,second):
2290 return '(%s REGEXP %s)' % (self.expand(first), 2291 self.expand(second,'string'))
2292
2293 - def select(self, query, fields, attributes):
2294 """ 2295 Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION. 2296 Note that the entire database, rather than one record, is locked 2297 (it will be locked eventually anyway by the following UPDATE). 2298 """ 2299 if attributes.get('for_update', False) and not 'cache' in attributes: 2300 self.execute('BEGIN IMMEDIATE TRANSACTION;') 2301 return super(SQLiteAdapter, self).select(query, fields, attributes)
2302
2303 -class SpatiaLiteAdapter(SQLiteAdapter):
2304 drivers = ('sqlite3','sqlite2') 2305 2306 types = copy.copy(BaseAdapter.types) 2307 types.update(geometry='GEOMETRY') 2308
2309 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2310 credential_decoder=IDENTITY, driver_args={}, 2311 adapter_args={}, do_connect=True, srid=4326, after_connection=None):
2312 self.db = db 2313 self.dbengine = "spatialite" 2314 self.uri = uri 2315 if do_connect: self.find_driver(adapter_args) 2316 self.pool_size = 0 2317 self.folder = folder 2318 self.db_codec = db_codec 2319 self._after_connection = after_connection 2320 self.find_or_make_work_folder() 2321 self.srid = srid 2322 path_encoding = sys.getfilesystemencoding() \ 2323 or locale.getdefaultlocale()[1] or 'utf8' 2324 if uri.startswith('spatialite:memory'): 2325 self.dbpath = ':memory:' 2326 else: 2327 self.dbpath = uri.split('://',1)[1] 2328 if self.dbpath[0] != '/': 2329 self.dbpath = pjoin( 2330 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2331 if not 'check_same_thread' in driver_args: 2332 driver_args['check_same_thread'] = False 2333 if not 'detect_types' in driver_args and do_connect: 2334 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2335 def connector(dbpath=self.dbpath, driver_args=driver_args): 2336 return self.driver.Connection(dbpath, **driver_args)
2337 self.connector = connector 2338 if do_connect: self.reconnect()
2339
2340 - def after_connection(self):
2341 self.connection.enable_load_extension(True) 2342 # for Windows, rename libspatialite-2.dll to libspatialite.dll 2343 # Linux uses libspatialite.so 2344 # Mac OS X uses libspatialite.dylib 2345 libspatialite = SPATIALLIBS[platform.system()] 2346 self.execute(r'SELECT load_extension("%s");' % libspatialite) 2347 2348 self.connection.create_function('web2py_extract', 2, 2349 SQLiteAdapter.web2py_extract) 2350 self.connection.create_function("REGEXP", 2, 2351 SQLiteAdapter.web2py_regexp)
2352 2353 # GIS functions 2354
2355 - def ST_ASGEOJSON(self, first, second):
2356 return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first), 2357 second['precision'], second['options'])
2358
2359 - def ST_ASTEXT(self, first):
2360 return 'AsText(%s)' %(self.expand(first))
2361
2362 - def ST_CONTAINS(self, first, second):
2363 return 'Contains(%s,%s)' %(self.expand(first), 2364 self.expand(second, first.type))
2365
2366 - def ST_DISTANCE(self, first, second):
2367 return 'Distance(%s,%s)' %(self.expand(first), 2368 self.expand(second, first.type))
2369
2370 - def ST_EQUALS(self, first, second):
2371 return 'Equals(%s,%s)' %(self.expand(first), 2372 self.expand(second, first.type))
2373
2374 - def ST_INTERSECTS(self, first, second):
2375 return 'Intersects(%s,%s)' %(self.expand(first), 2376 self.expand(second, first.type))
2377
2378 - def ST_OVERLAPS(self, first, second):
2379 return 'Overlaps(%s,%s)' %(self.expand(first), 2380 self.expand(second, first.type))
2381
2382 - def ST_SIMPLIFY(self, first, second):
2383 return 'Simplify(%s,%s)' %(self.expand(first), 2384 self.expand(second, 'double'))
2385
2386 - def ST_TOUCHES(self, first, second):
2387 return 'Touches(%s,%s)' %(self.expand(first), 2388 self.expand(second, first.type))
2389
2390 - def ST_WITHIN(self, first, second):
2391 return 'Within(%s,%s)' %(self.expand(first), 2392 self.expand(second, first.type))
2393
2394 - def represent(self, obj, fieldtype):
2395 field_is_type = fieldtype.startswith 2396 if field_is_type('geo'): 2397 srid = 4326 # Spatialite default srid for geometry 2398 geotype, parms = fieldtype[:-1].split('(') 2399 parms = parms.split(',') 2400 if len(parms) >= 2: 2401 schema, srid = parms[:2] 2402 # if field_is_type('geometry'): 2403 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2404 # elif field_is_type('geography'): 2405 # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2406 # else: 2407 # raise SyntaxError, 'Invalid field type %s' %fieldtype 2408 return value 2409 return BaseAdapter.represent(self, obj, fieldtype)
2410
2411 2412 -class JDBCSQLiteAdapter(SQLiteAdapter):
2413 drivers = ('zxJDBC_sqlite',) 2414
2415 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 2416 credential_decoder=IDENTITY, driver_args={}, 2417 adapter_args={}, do_connect=True, after_connection=None):
2418 self.db = db 2419 self.dbengine = "sqlite" 2420 self.uri = uri 2421 if do_connect: self.find_driver(adapter_args) 2422 self.pool_size = pool_size 2423 self.folder = folder 2424 self.db_codec = db_codec 2425 self._after_connection = after_connection 2426 self.find_or_make_work_folder() 2427 path_encoding = sys.getfilesystemencoding() \ 2428 or locale.getdefaultlocale()[1] or 'utf8' 2429 if uri.startswith('sqlite:memory'): 2430 self.dbpath = ':memory:' 2431 else: 2432 self.dbpath = uri.split('://',1)[1] 2433 if self.dbpath[0] != '/': 2434 self.dbpath = pjoin( 2435 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2436 def connector(dbpath=self.dbpath,driver_args=driver_args): 2437 return self.driver.connect( 2438 self.driver.getConnection('jdbc:sqlite:'+dbpath), 2439 **driver_args)
2440 self.connector = connector 2441 if do_connect: self.reconnect()
2442
2443 - def after_connection(self):
2444 # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs 2445 self.connection.create_function('web2py_extract', 2, 2446 SQLiteAdapter.web2py_extract)
2447
2448 - def execute(self, a):
2449 return self.log_execute(a)
2450
2451 2452 -class MySQLAdapter(BaseAdapter):
2453 drivers = ('MySQLdb','pymysql', 'mysqlconnector') 2454 2455 commit_on_alter_table = True 2456 support_distributed_transaction = True 2457 types = { 2458 'boolean': 'CHAR(1)', 2459 'string': 'VARCHAR(%(length)s)', 2460 'text': 'LONGTEXT', 2461 'json': 'LONGTEXT', 2462 'password': 'VARCHAR(%(length)s)', 2463 'blob': 'LONGBLOB', 2464 'upload': 'VARCHAR(%(length)s)', 2465 'integer': 'INT', 2466 'bigint': 'BIGINT', 2467 'float': 'FLOAT', 2468 'double': 'DOUBLE', 2469 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2470 'date': 'DATE', 2471 'time': 'TIME', 2472 'datetime': 'DATETIME', 2473 'id': 'INT AUTO_INCREMENT NOT NULL', 2474 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2475 'list:integer': 'LONGTEXT', 2476 'list:string': 'LONGTEXT', 2477 'list:reference': 'LONGTEXT', 2478 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL', 2479 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2480 } 2481 2482 QUOTE_TEMPLATE = "`%s`" 2483
2484 - def varquote(self,name):
2485 return varquote_aux(name,'`%s`')
2486
2487 - def RANDOM(self):
2488 return 'RAND()'
2489
2490 - def SUBSTRING(self,field,parameters):
2491 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), 2492 parameters[0], parameters[1])
2493
2494 - def EPOCH(self, first):
2495 return "UNIX_TIMESTAMP(%s)" % self.expand(first)
2496
2497 - def CONCAT(self, *items):
2498 return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items)
2499
2500 - def REGEXP(self,first,second):
2501 return '(%s REGEXP %s)' % (self.expand(first), 2502 self.expand(second,'string'))
2503
2504 - def _drop(self,table,mode):
2505 # breaks db integrity but without this mysql does not drop table 2506 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table, 2507 'SET FOREIGN_KEY_CHECKS=1;']
2508
2509 - def _insert_empty(self, table):
2510 return 'INSERT INTO %s VALUES (DEFAULT);' % table
2511
2512 - def distributed_transaction_begin(self,key):
2513 self.execute('XA START;')
2514
2515 - def prepare(self,key):
2516 self.execute("XA END;") 2517 self.execute("XA PREPARE;")
2518
2519 - def commit_prepared(self,ley):
2520 self.execute("XA COMMIT;")
2521
2522 - def rollback_prepared(self,key):
2523 self.execute("XA ROLLBACK;")
2524 2525 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 2526
2527 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2528 credential_decoder=IDENTITY, driver_args={}, 2529 adapter_args={}, do_connect=True, after_connection=None):
2530 self.db = db 2531 self.dbengine = "mysql" 2532 self.uri = uri 2533 if do_connect: self.find_driver(adapter_args,uri) 2534 self.pool_size = pool_size 2535 self.folder = folder 2536 self.db_codec = db_codec 2537 self._after_connection = after_connection 2538 self.find_or_make_work_folder() 2539 ruri = uri.split('://',1)[1] 2540 m = self.REGEX_URI.match(ruri) 2541 if not m: 2542 raise SyntaxError( 2543 "Invalid URI string in DAL: %s" % self.uri) 2544 user = credential_decoder(m.group('user')) 2545 if not user: 2546 raise SyntaxError('User required') 2547 password = credential_decoder(m.group('password')) 2548 if not password: 2549 password = '' 2550 host = m.group('host') 2551 if not host: 2552 raise SyntaxError('Host name required') 2553 db = m.group('db') 2554 if not db: 2555 raise SyntaxError('Database name required') 2556 port = int(m.group('port') or '3306') 2557 charset = m.group('charset') or 'utf8' 2558 driver_args.update(db=db, 2559 user=credential_decoder(user), 2560 passwd=credential_decoder(password), 2561 host=host, 2562 port=port, 2563 charset=charset) 2564 2565 2566 def connector(driver_args=driver_args): 2567 return self.driver.connect(**driver_args)
2568 self.connector = connector 2569 if do_connect: self.reconnect()
2570
2571 - def after_connection(self):
2572 self.execute('SET FOREIGN_KEY_CHECKS=1;') 2573 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2574
2575 - def lastrowid(self,table):
2576 self.execute('select last_insert_id();') 2577 return int(self.cursor.fetchone()[0])
2578
2579 2580 -class PostgreSQLAdapter(BaseAdapter):
2581 drivers = ('psycopg2','pg8000') 2582 2583 support_distributed_transaction = True 2584 types = { 2585 'boolean': 'CHAR(1)', 2586 'string': 'VARCHAR(%(length)s)', 2587 'text': 'TEXT', 2588 'json': 'TEXT', 2589 'password': 'VARCHAR(%(length)s)', 2590 'blob': 'BYTEA', 2591 'upload': 'VARCHAR(%(length)s)', 2592 'integer': 'INTEGER', 2593 'bigint': 'BIGINT', 2594 'float': 'FLOAT', 2595 'double': 'FLOAT8', 2596 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2597 'date': 'DATE', 2598 'time': 'TIME', 2599 'datetime': 'TIMESTAMP', 2600 'id': 'SERIAL PRIMARY KEY', 2601 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2602 'list:integer': 'TEXT', 2603 'list:string': 'TEXT', 2604 'list:reference': 'TEXT', 2605 'geometry': 'GEOMETRY', 2606 'geography': 'GEOGRAPHY', 2607 'big-id': 'BIGSERIAL PRIMARY KEY', 2608 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2609 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2610 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2611 2612 } 2613 2614 QUOTE_TEMPLATE = '%s' 2615
2616 - def varquote(self,name):
2617 return varquote_aux(name,'"%s"')
2618
2619 - def adapt(self,obj):
2620 if self.driver_name == 'psycopg2': 2621 return psycopg2_adapt(obj).getquoted() 2622 elif self.driver_name == 'pg8000': 2623 return "'%s'" % str(obj).replace("%","%%").replace("'","''") 2624 else: 2625 return "'%s'" % str(obj).replace("'","''")
2626
2627 - def sequence_name(self,table):
2628 return '%s_id_Seq' % table
2629
2630 - def RANDOM(self):
2631 return 'RANDOM()'
2632
2633 - def ADD(self, first, second):
2634 t = first.type 2635 if t in ('text','string','password', 'json', 'upload','blob'): 2636 return '(%s || %s)' % (self.expand(first), self.expand(second, t)) 2637 else: 2638 return '(%s + %s)' % (self.expand(first), self.expand(second, t))
2639
2640 - def distributed_transaction_begin(self,key):
2641 return
2642
2643 - def prepare(self,key):
2644 self.execute("PREPARE TRANSACTION '%s';" % key)
2645
2646 - def commit_prepared(self,key):
2647 self.execute("COMMIT PREPARED '%s';" % key)
2648
2649 - def rollback_prepared(self,key):
2650 self.execute("ROLLBACK PREPARED '%s';" % key)
2651
2652 - def create_sequence_and_triggers(self, query, table, **args):
2653 # following lines should only be executed if table._sequence_name does not exist 2654 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2655 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2656 # % (table._tablename, table._fieldname, table._sequence_name)) 2657 self.execute(query)
2658 2659 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 2660
2661 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2662 credential_decoder=IDENTITY, driver_args={}, 2663 adapter_args={}, do_connect=True, srid=4326, 2664 after_connection=None):
2665 self.db = db 2666 self.dbengine = "postgres" 2667 self.uri = uri 2668 if do_connect: self.find_driver(adapter_args,uri) 2669 self.pool_size = pool_size 2670 self.folder = folder 2671 self.db_codec = db_codec 2672 self._after_connection = after_connection 2673 self.srid = srid 2674 self.find_or_make_work_folder() 2675 ruri = uri.split('://',1)[1] 2676 m = self.REGEX_URI.match(ruri) 2677 if not m: 2678 raise SyntaxError("Invalid URI string in DAL") 2679 user = credential_decoder(m.group('user')) 2680 if not user: 2681 raise SyntaxError('User required') 2682 password = credential_decoder(m.group('password')) 2683 if not password: 2684 password = '' 2685 host = m.group('host') 2686 if not host: 2687 raise SyntaxError('Host name required') 2688 db = m.group('db') 2689 if not db: 2690 raise SyntaxError('Database name required') 2691 port = m.group('port') or '5432' 2692 sslmode = m.group('sslmode') 2693 if sslmode: 2694 msg = ("dbname='%s' user='%s' host='%s' " 2695 "port=%s password='%s' sslmode='%s'") \ 2696 % (db, user, host, port, password, sslmode) 2697 else: 2698 msg = ("dbname='%s' user='%s' host='%s' " 2699 "port=%s password='%s'") \ 2700 % (db, user, host, port, password) 2701 # choose diver according uri 2702 if self.driver: 2703 self.__version__ = "%s %s" % (self.driver.__name__, 2704 self.driver.__version__) 2705 else: 2706 self.__version__ = None 2707 def connector(msg=msg,driver_args=driver_args): 2708 return self.driver.connect(msg,**driver_args)
2709 self.connector = connector 2710 if do_connect: self.reconnect()
2711
2712 - def after_connection(self):
2713 self.connection.set_client_encoding('UTF8') 2714 self.execute("SET standard_conforming_strings=on;") 2715 self.try_json()
2716
2717 - def lastrowid(self,table):
2718 self.execute("select currval('%s')" % table._sequence_name) 2719 return int(self.cursor.fetchone()[0])
2720
2721 - def try_json(self):
2722 # check JSON data type support 2723 # (to be added to after_connection) 2724 if self.driver_name == "pg8000": 2725 supports_json = self.connection.server_version >= "9.2.0" 2726 elif (self.driver_name == "psycopg2") and \ 2727 (self.driver.__version__ >= "2.0.12"): 2728 supports_json = self.connection.server_version >= 90200 2729 elif self.driver_name == "zxJDBC": 2730 supports_json = self.connection.dbversion >= "9.2.0" 2731 else: supports_json = None 2732 if supports_json: 2733 self.types["json"] = "JSON" 2734 self.native_json = True 2735 else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)")
2736
2737 - def LIKE(self,first,second):
2738 args = (self.expand(first), self.expand(second,'string')) 2739 if not first.type in ('string', 'text', 'json'): 2740 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2741 else: 2742 return '(%s LIKE %s)' % args
2743
2744 - def ILIKE(self,first,second):
2745 args = (self.expand(first), self.expand(second,'string')) 2746 if not first.type in ('string', 'text', 'json'): 2747 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2748 else: 2749 return '(%s ILIKE %s)' % args
2750
2751 - def REGEXP(self,first,second):
2752 return '(%s ~ %s)' % (self.expand(first), 2753 self.expand(second,'string'))
2754
2755 - def STARTSWITH(self,first,second):
2756 return '(%s ILIKE %s)' % (self.expand(first), 2757 self.expand(second+'%','string'))
2758
2759 - def ENDSWITH(self,first,second):
2760 return '(%s ILIKE %s)' % (self.expand(first), 2761 self.expand('%'+second,'string'))
2762 2763 # GIS functions 2764
2765 - def ST_ASGEOJSON(self, first, second):
2766 """ 2767 http://postgis.org/docs/ST_AsGeoJSON.html 2768 """ 2769 return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'], 2770 self.expand(first), second['precision'], second['options'])
2771
2772 - def ST_ASTEXT(self, first):
2773 """ 2774 http://postgis.org/docs/ST_AsText.html 2775 """ 2776 return 'ST_AsText(%s)' %(self.expand(first))
2777
2778 - def ST_X(self, first):
2779 """ 2780 http://postgis.org/docs/ST_X.html 2781 """ 2782 return 'ST_X(%s)' %(self.expand(first))
2783
2784 - def ST_Y(self, first):
2785 """ 2786 http://postgis.org/docs/ST_Y.html 2787 """ 2788 return 'ST_Y(%s)' %(self.expand(first))
2789
2790 - def ST_CONTAINS(self, first, second):
2791 """ 2792 http://postgis.org/docs/ST_Contains.html 2793 """ 2794 return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2795
2796 - def ST_DISTANCE(self, first, second):
2797 """ 2798 http://postgis.org/docs/ST_Distance.html 2799 """ 2800 return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2801
2802 - def ST_EQUALS(self, first, second):
2803 """ 2804 http://postgis.org/docs/ST_Equals.html 2805 """ 2806 return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2807
2808 - def ST_INTERSECTS(self, first, second):
2809 """ 2810 http://postgis.org/docs/ST_Intersects.html 2811 """ 2812 return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2813
2814 - def ST_OVERLAPS(self, first, second):
2815 """ 2816 http://postgis.org/docs/ST_Overlaps.html 2817 """ 2818 return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2819
2820 - def ST_SIMPLIFY(self, first, second):
2821 """ 2822 http://postgis.org/docs/ST_Simplify.html 2823 """ 2824 return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
2825
2826 - def ST_TOUCHES(self, first, second):
2827 """ 2828 http://postgis.org/docs/ST_Touches.html 2829 """ 2830 return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2831
2832 - def ST_WITHIN(self, first, second):
2833 """ 2834 http://postgis.org/docs/ST_Within.html 2835 """ 2836 return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2837
2838 - def represent(self, obj, fieldtype):
2839 field_is_type = fieldtype.startswith 2840 if field_is_type('geo'): 2841 srid = 4326 # postGIS default srid for geometry 2842 geotype, parms = fieldtype[:-1].split('(') 2843 parms = parms.split(',') 2844 if len(parms) >= 2: 2845 schema, srid = parms[:2] 2846 if field_is_type('geometry'): 2847 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2848 elif field_is_type('geography'): 2849 value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2850 # else: 2851 # raise SyntaxError('Invalid field type %s' %fieldtype) 2852 return value 2853 return BaseAdapter.represent(self, obj, fieldtype)
2854
2855 -class NewPostgreSQLAdapter(PostgreSQLAdapter):
2856 drivers = ('psycopg2','pg8000') 2857 2858 types = { 2859 'boolean': 'CHAR(1)', 2860 'string': 'VARCHAR(%(length)s)', 2861 'text': 'TEXT', 2862 'json': 'TEXT', 2863 'password': 'VARCHAR(%(length)s)', 2864 'blob': 'BYTEA', 2865 'upload': 'VARCHAR(%(length)s)', 2866 'integer': 'INTEGER', 2867 'bigint': 'BIGINT', 2868 'float': 'FLOAT', 2869 'double': 'FLOAT8', 2870 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2871 'date': 'DATE', 2872 'time': 'TIME', 2873 'datetime': 'TIMESTAMP', 2874 'id': 'SERIAL PRIMARY KEY', 2875 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2876 'list:integer': 'BIGINT[]', 2877 'list:string': 'TEXT[]', 2878 'list:reference': 'BIGINT[]', 2879 'geometry': 'GEOMETRY', 2880 'geography': 'GEOGRAPHY', 2881 'big-id': 'BIGSERIAL PRIMARY KEY', 2882 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2883 } 2884
2885 - def parse_list_integers(self, value, field_type):
2886 return value
2887
2888 - def parse_list_references(self, value, field_type):
2889 return [self.parse_reference(r, field_type[5:]) for r in value]
2890
2891 - def parse_list_strings(self, value, field_type):
2892 return value
2893
2894 - def represent(self, obj, fieldtype):
2895 field_is_type = fieldtype.startswith 2896 if field_is_type('list:'): 2897 if not obj: 2898 obj = [] 2899 elif not isinstance(obj, (list, tuple)): 2900 obj = [obj] 2901 if field_is_type('list:string'): 2902 obj = map(str,obj) 2903 else: 2904 obj = map(int,obj) 2905 return 'ARRAY[%s]' % ','.join(repr(item) for item in obj) 2906 return BaseAdapter.represent(self, obj, fieldtype)
2907
2908 2909 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
2910 drivers = ('zxJDBC',) 2911 2912 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 2913
2914 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2915 credential_decoder=IDENTITY, driver_args={}, 2916 adapter_args={}, do_connect=True, after_connection=None ):
2917 self.db = db 2918 self.dbengine = "postgres" 2919 self.uri = uri 2920 if do_connect: self.find_driver(adapter_args,uri) 2921 self.pool_size = pool_size 2922 self.folder = folder 2923 self.db_codec = db_codec 2924 self._after_connection = after_connection 2925 self.find_or_make_work_folder() 2926 ruri = uri.split('://',1)[1] 2927 m = self.REGEX_URI.match(ruri) 2928 if not m: 2929 raise SyntaxError("Invalid URI string in DAL") 2930 user = credential_decoder(m.group('user')) 2931 if not user: 2932 raise SyntaxError('User required') 2933 password = credential_decoder(m.group('password')) 2934 if not password: 2935 password = '' 2936 host = m.group('host') 2937 if not host: 2938 raise SyntaxError('Host name required') 2939 db = m.group('db') 2940 if not db: 2941 raise SyntaxError('Database name required') 2942 port = m.group('port') or '5432' 2943 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 2944 def connector(msg=msg,driver_args=driver_args): 2945 return self.driver.connect(*msg,**driver_args)
2946 self.connector = connector 2947 if do_connect: self.reconnect()
2948
2949 - def after_connection(self):
2950 self.connection.set_client_encoding('UTF8') 2951 self.execute('BEGIN;') 2952 self.execute("SET CLIENT_ENCODING TO 'UNICODE';") 2953 self.try_json()
2954
2955 2956 -class OracleAdapter(BaseAdapter):
2957 drivers = ('cx_Oracle',) 2958 2959 commit_on_alter_table = False 2960 types = { 2961 'boolean': 'CHAR(1)', 2962 'string': 'VARCHAR2(%(length)s)', 2963 'text': 'CLOB', 2964 'json': 'CLOB', 2965 'password': 'VARCHAR2(%(length)s)', 2966 'blob': 'CLOB', 2967 'upload': 'VARCHAR2(%(length)s)', 2968 'integer': 'INT', 2969 'bigint': 'NUMBER', 2970 'float': 'FLOAT', 2971 'double': 'BINARY_DOUBLE', 2972 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2973 'date': 'DATE', 2974 'time': 'CHAR(8)', 2975 'datetime': 'DATE', 2976 'id': 'NUMBER PRIMARY KEY', 2977 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2978 'list:integer': 'CLOB', 2979 'list:string': 'CLOB', 2980 'list:reference': 'CLOB', 2981 'big-id': 'NUMBER PRIMARY KEY', 2982 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2983 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2984 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2985 } 2986
2987 - def sequence_name(self,tablename):
2988 return '%s_sequence' % tablename
2989
2990 - def trigger_name(self,tablename):
2991 return '%s_trigger' % tablename
2992
2993 - def LEFT_JOIN(self):
2994 return 'LEFT OUTER JOIN'
2995
2996 - def RANDOM(self):
2997 return 'dbms_random.value'
2998
2999 - def NOT_NULL(self,default,field_type):
3000 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3001
3002 - def _drop(self,table,mode):
3003 sequence_name = table._sequence_name 3004 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
3005
3006 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3007 if limitby: 3008 (lmin, lmax) = limitby 3009 if len(sql_w) > 1: 3010 sql_w_row = sql_w + ' AND w_row > %i' % lmin 3011 else: 3012 sql_w_row = 'WHERE w_row > %i' % lmin 3013 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 3014 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3015
3016 - def constraint_name(self, tablename, fieldname):
3017 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 3018 if len(constraint_name)>30: 3019 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 3020 return constraint_name
3021
3022 - def represent_exceptions(self, obj, fieldtype):
3023 if fieldtype == 'blob': 3024 obj = base64.b64encode(str(obj)) 3025 return ":CLOB('%s')" % obj 3026 elif fieldtype == 'date': 3027 if isinstance(obj, (datetime.date, datetime.datetime)): 3028 obj = obj.isoformat()[:10] 3029 else: 3030 obj = str(obj) 3031 return "to_date('%s','yyyy-mm-dd')" % obj 3032 elif fieldtype == 'datetime': 3033 if isinstance(obj, datetime.datetime): 3034 obj = obj.isoformat()[:19].replace('T',' ') 3035 elif isinstance(obj, datetime.date): 3036 obj = obj.isoformat()[:10]+' 00:00:00' 3037 else: 3038 obj = str(obj) 3039 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 3040 return None
3041
3042 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3043 credential_decoder=IDENTITY, driver_args={}, 3044 adapter_args={}, do_connect=True, after_connection=None):
3045 self.db = db 3046 self.dbengine = "oracle" 3047 self.uri = uri 3048 if do_connect: self.find_driver(adapter_args,uri) 3049 self.pool_size = pool_size 3050 self.folder = folder 3051 self.db_codec = db_codec 3052 self._after_connection = after_connection 3053 self.find_or_make_work_folder() 3054 ruri = uri.split('://',1)[1] 3055 if not 'threaded' in driver_args: 3056 driver_args['threaded']=True 3057 def connector(uri=ruri,driver_args=driver_args): 3058 return self.driver.connect(uri,**driver_args)
3059 self.connector = connector 3060 if do_connect: self.reconnect()
3061
3062 - def after_connection(self):
3063 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 3064 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
3065 3066 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 3067
3068 - def execute(self, command, args=None):
3069 args = args or [] 3070 i = 1 3071 while True: 3072 m = self.oracle_fix.match(command) 3073 if not m: 3074 break 3075 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 3076 args.append(m.group('clob')[6:-2].replace("''", "'")) 3077 i += 1 3078 if command[-1:]==';': 3079 command = command[:-1] 3080 return self.log_execute(command, args)
3081
3082 - def create_sequence_and_triggers(self, query, table, **args):
3083 tablename = table._tablename 3084 id_name = table._id.name 3085 sequence_name = table._sequence_name 3086 trigger_name = table._trigger_name 3087 self.execute(query) 3088 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name) 3089 self.execute(""" 3090 CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW 3091 DECLARE 3092 curr_val NUMBER; 3093 diff_val NUMBER; 3094 PRAGMA autonomous_transaction; 3095 BEGIN 3096 IF :NEW.%(id)s IS NOT NULL THEN 3097 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3098 diff_val := :NEW.%(id)s - curr_val - 1; 3099 IF diff_val != 0 THEN 3100 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val; 3101 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3102 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1'; 3103 END IF; 3104 END IF; 3105 SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL; 3106 END; 3107 """ % dict(trigger_name=trigger_name, tablename=tablename, 3108 sequence_name=sequence_name,id=id_name))
3109
3110 - def lastrowid(self,table):
3111 sequence_name = table._sequence_name 3112 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 3113 return long(self.cursor.fetchone()[0])
3114 3115 #def parse_value(self, value, field_type, blob_decode=True): 3116 # if blob_decode and isinstance(value, cx_Oracle.LOB): 3117 # try: 3118 # value = value.read() 3119 # except self.driver.ProgrammingError: 3120 # # After a subsequent fetch the LOB value is not valid anymore 3121 # pass 3122 # return BaseAdapter.parse_value(self, value, field_type, blob_decode) 3123
3124 - def _fetchall(self):
3125 if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description): 3126 return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \ 3127 for c in r]) for r in self.cursor] 3128 else: 3129 return self.cursor.fetchall()
3130
3131 -class MSSQLAdapter(BaseAdapter):
3132 drivers = ('pyodbc',) 3133 T_SEP = 'T' 3134 3135 QUOTE_TEMPLATE = "[%s]" 3136 3137 types = { 3138 'boolean': 'BIT', 3139 'string': 'VARCHAR(%(length)s)', 3140 'text': 'TEXT', 3141 'json': 'TEXT', 3142 'password': 'VARCHAR(%(length)s)', 3143 'blob': 'IMAGE', 3144 'upload': 'VARCHAR(%(length)s)', 3145 'integer': 'INT', 3146 'bigint': 'BIGINT', 3147 'float': 'FLOAT', 3148 'double': 'FLOAT', 3149 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3150 'date': 'DATETIME', 3151 'time': 'CHAR(8)', 3152 'datetime': 'DATETIME', 3153 'id': 'INT IDENTITY PRIMARY KEY', 3154 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3155 'list:integer': 'TEXT', 3156 'list:string': 'TEXT', 3157 'list:reference': 'TEXT', 3158 'geometry': 'geometry', 3159 'geography': 'geography', 3160 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3161 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3162 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3163 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3164 } 3165
3166 - def concat_add(self,tablename):
3167 return '; ALTER TABLE %s ADD ' % tablename
3168
3169 - def varquote(self,name):
3170 return varquote_aux(name,'[%s]')
3171
3172 - def EXTRACT(self,field,what):
3173 return "DATEPART(%s,%s)" % (what, self.expand(field))
3174
3175 - def LEFT_JOIN(self):
3176 return 'LEFT OUTER JOIN'
3177
3178 - def RANDOM(self):
3179 return 'NEWID()'
3180
3181 - def ALLOW_NULL(self):
3182 return ' NULL'
3183
3184 - def SUBSTRING(self,field,parameters):
3185 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
3186
3187 - def PRIMARY_KEY(self,key):
3188 return 'PRIMARY KEY CLUSTERED (%s)' % key
3189
3190 - def AGGREGATE(self, first, what):
3191 if what == 'LENGTH': 3192 what = 'LEN' 3193 return "%s(%s)" % (what, self.expand(first))
3194 3195
3196 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3197 if limitby: 3198 (lmin, lmax) = limitby 3199 sql_s += ' TOP %i' % lmax 3200 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3201 3202 TRUE = 1 3203 FALSE = 0 3204 3205 REGEX_DSN = re.compile('^(?P<dsn>.+)$') 3206 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$') 3207 REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 3208
3209 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3210 credential_decoder=IDENTITY, driver_args={}, 3211 adapter_args={}, do_connect=True, srid=4326, 3212 after_connection=None):
3213 self.db = db 3214 self.dbengine = "mssql" 3215 self.uri = uri 3216 if do_connect: self.find_driver(adapter_args,uri) 3217 self.pool_size = pool_size 3218 self.folder = folder 3219 self.db_codec = db_codec 3220 self._after_connection = after_connection 3221 self.srid = srid 3222 self.find_or_make_work_folder() 3223 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3224 ruri = uri.split('://',1)[1] 3225 if '@' not in ruri: 3226 try: 3227 m = self.REGEX_DSN.match(ruri) 3228 if not m: 3229 raise SyntaxError( 3230 'Parsing uri string(%s) has no result' % self.uri) 3231 dsn = m.group('dsn') 3232 if not dsn: 3233 raise SyntaxError('DSN required') 3234 except SyntaxError: 3235 e = sys.exc_info()[1] 3236 LOGGER.error('NdGpatch error') 3237 raise e 3238 # was cnxn = 'DSN=%s' % dsn 3239 cnxn = dsn 3240 else: 3241 m = self.REGEX_URI.match(ruri) 3242 if not m: 3243 raise SyntaxError( 3244 "Invalid URI string in DAL: %s" % self.uri) 3245 user = credential_decoder(m.group('user')) 3246 if not user: 3247 raise SyntaxError('User required') 3248 password = credential_decoder(m.group('password')) 3249 if not password: 3250 password = '' 3251 host = m.group('host') 3252 if not host: 3253 raise SyntaxError('Host name required') 3254 db = m.group('db') 3255 if not db: 3256 raise SyntaxError('Database name required') 3257 port = m.group('port') or '1433' 3258 # Parse the optional url name-value arg pairs after the '?' 3259 # (in the form of arg1=value1&arg2=value2&...) 3260 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 3261 argsdict = { 'DRIVER':'{SQL Server}' } 3262 urlargs = m.group('urlargs') or '' 3263 for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs): 3264 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 3265 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()]) 3266 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 3267 % (host, port, db, user, password, urlargs) 3268 def connector(cnxn=cnxn,driver_args=driver_args): 3269 return self.driver.connect(cnxn,**driver_args)
3270 self.connector = connector 3271 if do_connect: self.reconnect()
3272
3273 - def lastrowid(self,table):
3274 #self.execute('SELECT @@IDENTITY;') 3275 self.execute('SELECT SCOPE_IDENTITY();') 3276 return long(self.cursor.fetchone()[0])
3277
3278 - def rowslice(self,rows,minimum=0,maximum=None):
3279 if maximum is None: 3280 return rows[minimum:] 3281 return rows[minimum:maximum]
3282
3283 - def EPOCH(self, first):
3284 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3285
3286 - def CONCAT(self, *items):
3287 return '(%s)' % ' + '.join(self.expand(x,'string') for x in items)
3288 3289 # GIS Spatial Extensions 3290 3291 # No STAsGeoJSON in MSSQL 3292
3293 - def ST_ASTEXT(self, first):
3294 return '%s.STAsText()' %(self.expand(first))
3295
3296 - def ST_CONTAINS(self, first, second):
3297 return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
3298
3299 - def ST_DISTANCE(self, first, second):
3300 return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
3301
3302 - def ST_EQUALS(self, first, second):
3303 return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
3304
3305 - def ST_INTERSECTS(self, first, second):
3306 return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
3307
3308 - def ST_OVERLAPS(self, first, second):
3309 return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
3310 3311 # no STSimplify in MSSQL 3312
3313 - def ST_TOUCHES(self, first, second):
3314 return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
3315
3316 - def ST_WITHIN(self, first, second):
3317 return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
3318
3319 - def represent(self, obj, fieldtype):
3320 field_is_type = fieldtype.startswith 3321 if field_is_type('geometry'): 3322 srid = 0 # MS SQL default srid for geometry 3323 geotype, parms = fieldtype[:-1].split('(') 3324 if parms: 3325 srid = parms 3326 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3327 elif fieldtype == 'geography': 3328 srid = 4326 # MS SQL default srid for geography 3329 geotype, parms = fieldtype[:-1].split('(') 3330 if parms: 3331 srid = parms 3332 return "geography::STGeomFromText('%s',%s)" %(obj, srid) 3333 # else: 3334 # raise SyntaxError('Invalid field type %s' %fieldtype) 3335 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3336 return BaseAdapter.represent(self, obj, fieldtype)
3337
3338 3339 -class MSSQL3Adapter(MSSQLAdapter):
3340 """ experimental support for pagination in MSSQL"""
3341 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3342 if limitby: 3343 (lmin, lmax) = limitby 3344 if lmin == 0: 3345 sql_s += ' TOP %i' % lmax 3346 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) 3347 lmin += 1 3348 sql_o_inner = sql_o[sql_o.find('ORDER BY ')+9:] 3349 sql_g_inner = sql_o[:sql_o.find('ORDER BY ')] 3350 sql_f_outer = ['f_%s' % f for f in range(len(sql_f.split(',')))] 3351 sql_f_inner = [f for f in sql_f.split(',')] 3352 sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)] 3353 sql_f_iproxy = ', '.join(sql_f_iproxy) 3354 sql_f_oproxy = ', '.join(sql_f_outer) 3355 return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax) 3356 return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o)
3357 - def rowslice(self,rows,minimum=0,maximum=None):
3358 return rows
3359
3360 3361 -class MSSQL2Adapter(MSSQLAdapter):
3362 drivers = ('pyodbc',) 3363 3364 types = { 3365 'boolean': 'CHAR(1)', 3366 'string': 'NVARCHAR(%(length)s)', 3367 'text': 'NTEXT', 3368 'json': 'NTEXT', 3369 'password': 'NVARCHAR(%(length)s)', 3370 'blob': 'IMAGE', 3371 'upload': 'NVARCHAR(%(length)s)', 3372 'integer': 'INT', 3373 'bigint': 'BIGINT', 3374 'float': 'FLOAT', 3375 'double': 'FLOAT', 3376 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3377 'date': 'DATETIME', 3378 'time': 'CHAR(8)', 3379 'datetime': 'DATETIME', 3380 'id': 'INT IDENTITY PRIMARY KEY', 3381 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3382 'list:integer': 'NTEXT', 3383 'list:string': 'NTEXT', 3384 'list:reference': 'NTEXT', 3385 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3386 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3387 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3388 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3389 } 3390
3391 - def represent(self, obj, fieldtype):
3392 value = BaseAdapter.represent(self, obj, fieldtype) 3393 if fieldtype in ('string','text', 'json') and value[:1]=="'": 3394 value = 'N'+value 3395 return value
3396
3397 - def execute(self,a):
3398 return self.log_execute(a.decode('utf8'))
3399
3400 -class VerticaAdapter(MSSQLAdapter):
3401 drivers = ('pyodbc',) 3402 T_SEP = ' ' 3403 3404 types = { 3405 'boolean': 'BOOLEAN', 3406 'string': 'VARCHAR(%(length)s)', 3407 'text': 'BYTEA', 3408 'json': 'VARCHAR(%(length)s)', 3409 'password': 'VARCHAR(%(length)s)', 3410 'blob': 'BYTEA', 3411 'upload': 'VARCHAR(%(length)s)', 3412 'integer': 'INT', 3413 'bigint': 'BIGINT', 3414 'float': 'FLOAT', 3415 'double': 'DOUBLE PRECISION', 3416 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3417 'date': 'DATE', 3418 'time': 'TIME', 3419 'datetime': 'DATETIME', 3420 'id': 'IDENTITY', 3421 'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3422 'list:integer': 'BYTEA', 3423 'list:string': 'BYTEA', 3424 'list:reference': 'BYTEA', 3425 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3426 } 3427 3428
3429 - def EXTRACT(self, first, what):
3430 return "DATE_PART('%s', TIMESTAMP %s)" % (what, self.expand(first))
3431
3432 - def _truncate(self, table, mode=''):
3433 tablename = table._tablename 3434 return ['TRUNCATE %s %s;' % (tablename, mode or '')]
3435
3436 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3437 if limitby: 3438 (lmin, lmax) = limitby 3439 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 3440 return 'SELECT %s %s FROM %s%s%s;' % \ 3441 (sql_s, sql_f, sql_t, sql_w, sql_o)
3442
3443 - def lastrowid(self,table):
3444 self.execute('SELECT LAST_INSERT_ID();') 3445 return long(self.cursor.fetchone()[0])
3446
3447 - def execute(self, a):
3448 return self.log_execute(a)
3449
3450 -class SybaseAdapter(MSSQLAdapter):
3451 drivers = ('Sybase',) 3452 3453 types = { 3454 'boolean': 'BIT', 3455 'string': 'CHAR VARYING(%(length)s)', 3456 'text': 'TEXT', 3457 'json': 'TEXT', 3458 'password': 'CHAR VARYING(%(length)s)', 3459 'blob': 'IMAGE', 3460 'upload': 'CHAR VARYING(%(length)s)', 3461 'integer': 'INT', 3462 'bigint': 'BIGINT', 3463 'float': 'FLOAT', 3464 'double': 'FLOAT', 3465 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3466 'date': 'DATETIME', 3467 'time': 'CHAR(8)', 3468 'datetime': 'DATETIME', 3469 'id': 'INT IDENTITY PRIMARY KEY', 3470 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3471 'list:integer': 'TEXT', 3472 'list:string': 'TEXT', 3473 'list:reference': 'TEXT', 3474 'geometry': 'geometry', 3475 'geography': 'geography', 3476 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3477 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3478 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3479 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3480 } 3481 3482
3483 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3484 credential_decoder=IDENTITY, driver_args={}, 3485 adapter_args={}, do_connect=True, srid=4326, 3486 after_connection=None):
3487 self.db = db 3488 self.dbengine = "sybase" 3489 self.uri = uri 3490 if do_connect: self.find_driver(adapter_args,uri) 3491 self.pool_size = pool_size 3492 self.folder = folder 3493 self.db_codec = db_codec 3494 self._after_connection = after_connection 3495 self.srid = srid 3496 self.find_or_make_work_folder() 3497 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3498 ruri = uri.split('://',1)[1] 3499 if '@' not in ruri: 3500 try: 3501 m = self.REGEX_DSN.match(ruri) 3502 if not m: 3503 raise SyntaxError( 3504 'Parsing uri string(%s) has no result' % self.uri) 3505 dsn = m.group('dsn') 3506 if not dsn: 3507 raise SyntaxError('DSN required') 3508 except SyntaxError: 3509 e = sys.exc_info()[1] 3510 LOGGER.error('NdGpatch error') 3511 raise e 3512 else: 3513 m = self.REGEX_URI.match(uri) 3514 if not m: 3515 raise SyntaxError( 3516 "Invalid URI string in DAL: %s" % self.uri) 3517 user = credential_decoder(m.group('user')) 3518 if not user: 3519 raise SyntaxError('User required') 3520 password = credential_decoder(m.group('password')) 3521 if not password: 3522 password = '' 3523 host = m.group('host') 3524 if not host: 3525 raise SyntaxError('Host name required') 3526 db = m.group('db') 3527 if not db: 3528 raise SyntaxError('Database name required') 3529 port = m.group('port') or '1433' 3530 3531 dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db) 3532 3533 driver_args.update(user = credential_decoder(user), 3534 password = credential_decoder(password)) 3535 3536 def connector(dsn=dsn,driver_args=driver_args): 3537 return self.driver.connect(dsn,**driver_args)
3538 self.connector = connector 3539 if do_connect: self.reconnect()
3540
3541 3542 -class FireBirdAdapter(BaseAdapter):
3543 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3544 3545 commit_on_alter_table = False 3546 support_distributed_transaction = True 3547 types = { 3548 'boolean': 'CHAR(1)', 3549 'string': 'VARCHAR(%(length)s)', 3550 'text': 'BLOB SUB_TYPE 1', 3551 'json': 'BLOB SUB_TYPE 1', 3552 'password': 'VARCHAR(%(length)s)', 3553 'blob': 'BLOB SUB_TYPE 0', 3554 'upload': 'VARCHAR(%(length)s)', 3555 'integer': 'INTEGER', 3556 'bigint': 'BIGINT', 3557 'float': 'FLOAT', 3558 'double': 'DOUBLE PRECISION', 3559 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3560 'date': 'DATE', 3561 'time': 'TIME', 3562 'datetime': 'TIMESTAMP', 3563 'id': 'INTEGER PRIMARY KEY', 3564 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3565 'list:integer': 'BLOB SUB_TYPE 1', 3566 'list:string': 'BLOB SUB_TYPE 1', 3567 'list:reference': 'BLOB SUB_TYPE 1', 3568 'big-id': 'BIGINT PRIMARY KEY', 3569 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3570 } 3571
3572 - def sequence_name(self,tablename):
3573 return 'genid_%s' % tablename
3574
3575 - def trigger_name(self,tablename):
3576 return 'trg_id_%s' % tablename
3577
3578 - def RANDOM(self):
3579 return 'RAND()'
3580
3581 - def EPOCH(self, first):
3582 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3583
3584 - def NOT_NULL(self,default,field_type):
3585 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3586
3587 - def SUBSTRING(self,field,parameters):
3588 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
3589
3590 - def LENGTH(self, first):
3591 return "CHAR_LENGTH(%s)" % self.expand(first)
3592
3593 - def CONTAINS(self,first,second,case_sensitive=False):
3594 if first.type.startswith('list:'): 3595 second = Expression(None,self.CONCAT('|',Expression( 3596 None,self.REPLACE(second,('|','||'))),'|')) 3597 return '(%s CONTAINING %s)' % (self.expand(first), 3598 self.expand(second, 'string'))
3599
3600 - def _drop(self,table,mode):
3601 sequence_name = table._sequence_name 3602 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
3603
3604 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3605 if limitby: 3606 (lmin, lmax) = limitby 3607 sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s) 3608 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3609
3610 - def _truncate(self,table,mode = ''):
3611 return ['DELETE FROM %s;' % table._tablename, 3612 'SET GENERATOR %s TO 0;' % table._sequence_name]
3613 3614 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$') 3615
3616 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3617 credential_decoder=IDENTITY, driver_args={}, 3618 adapter_args={}, do_connect=True, after_connection=None):
3619 self.db = db 3620 self.dbengine = "firebird" 3621 self.uri = uri 3622 if do_connect: self.find_driver(adapter_args,uri) 3623 self.pool_size = pool_size 3624 self.folder = folder 3625 self.db_codec = db_codec 3626 self._after_connection = after_connection 3627 self.find_or_make_work_folder() 3628 ruri = uri.split('://',1)[1] 3629 m = self.REGEX_URI.match(ruri) 3630 if not m: 3631 raise SyntaxError("Invalid URI string in DAL: %s" % self.uri) 3632 user = credential_decoder(m.group('user')) 3633 if not user: 3634 raise SyntaxError('User required') 3635 password = credential_decoder(m.group('password')) 3636 if not password: 3637 password = '' 3638 host = m.group('host') 3639 if not host: 3640 raise SyntaxError('Host name required') 3641 port = int(m.group('port') or 3050) 3642 db = m.group('db') 3643 if not db: 3644 raise SyntaxError('Database name required') 3645 charset = m.group('charset') or 'UTF8' 3646 driver_args.update(dsn='%s/%s:%s' % (host,port,db), 3647 user = credential_decoder(user), 3648 password = credential_decoder(password), 3649 charset = charset) 3650 3651 def connector(driver_args=driver_args): 3652 return self.driver.connect(**driver_args)
3653 self.connector = connector 3654 if do_connect: self.reconnect()
3655
3656 - def create_sequence_and_triggers(self, query, table, **args):
3657 tablename = table._tablename 3658 sequence_name = table._sequence_name 3659 trigger_name = table._trigger_name 3660 self.execute(query) 3661 self.execute('create generator %s;' % sequence_name) 3662 self.execute('set generator %s to 0;' % sequence_name) 3663 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
3664
3665 - def lastrowid(self,table):
3666 sequence_name = table._sequence_name 3667 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 3668 return long(self.cursor.fetchone()[0])
3669
3670 3671 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
3672 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3673 3674 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$') 3675
3676 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3677 credential_decoder=IDENTITY, driver_args={}, 3678 adapter_args={}, do_connect=True, after_connection=None):
3679 self.db = db 3680 self.dbengine = "firebird" 3681 self.uri = uri 3682 if do_connect: self.find_driver(adapter_args,uri) 3683 self.pool_size = pool_size 3684 self.folder = folder 3685 self.db_codec = db_codec 3686 self._after_connection = after_connection 3687 self.find_or_make_work_folder() 3688 ruri = uri.split('://',1)[1] 3689 m = self.REGEX_URI.match(ruri) 3690 if not m: 3691 raise SyntaxError( 3692 "Invalid URI string in DAL: %s" % self.uri) 3693 user = credential_decoder(m.group('user')) 3694 if not user: 3695 raise SyntaxError('User required') 3696 password = credential_decoder(m.group('password')) 3697 if not password: 3698 password = '' 3699 pathdb = m.group('path') 3700 if not pathdb: 3701 raise SyntaxError('Path required') 3702 charset = m.group('charset') 3703 if not charset: 3704 charset = 'UTF8' 3705 host = '' 3706 driver_args.update(host=host, 3707 database=pathdb, 3708 user=credential_decoder(user), 3709 password=credential_decoder(password), 3710 charset=charset) 3711 3712 def connector(driver_args=driver_args): 3713 return self.driver.connect(**driver_args)
3714 self.connector = connector 3715 if do_connect: self.reconnect()
3716
3717 -class InformixAdapter(BaseAdapter):
3718 drivers = ('informixdb',) 3719 3720 types = { 3721 'boolean': 'CHAR(1)', 3722 'string': 'VARCHAR(%(length)s)', 3723 'text': 'BLOB SUB_TYPE 1', 3724 'json': 'BLOB SUB_TYPE 1', 3725 'password': 'VARCHAR(%(length)s)', 3726 'blob': 'BLOB SUB_TYPE 0', 3727 'upload': 'VARCHAR(%(length)s)', 3728 'integer': 'INTEGER', 3729 'bigint': 'BIGINT', 3730 'float': 'FLOAT', 3731 'double': 'DOUBLE PRECISION', 3732 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3733 'date': 'DATE', 3734 'time': 'CHAR(8)', 3735 'datetime': 'DATETIME', 3736 'id': 'SERIAL', 3737 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3738 'list:integer': 'BLOB SUB_TYPE 1', 3739 'list:string': 'BLOB SUB_TYPE 1', 3740 'list:reference': 'BLOB SUB_TYPE 1', 3741 'big-id': 'BIGSERIAL', 3742 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3743 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 3744 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 3745 } 3746
3747 - def RANDOM(self):
3748 return 'Random()'
3749
3750 - def NOT_NULL(self,default,field_type):
3751 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3752
3753 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3754 if limitby: 3755 (lmin, lmax) = limitby 3756 fetch_amt = lmax - lmin 3757 dbms_version = int(self.connection.dbms_version.split('.')[0]) 3758 if lmin and (dbms_version >= 10): 3759 # Requires Informix 10.0+ 3760 sql_s += ' SKIP %d' % (lmin, ) 3761 if fetch_amt and (dbms_version >= 9): 3762 # Requires Informix 9.0+ 3763 sql_s += ' FIRST %d' % (fetch_amt, ) 3764 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3765
3766 - def represent_exceptions(self, obj, fieldtype):
3767 if fieldtype == 'date': 3768 if isinstance(obj, (datetime.date, datetime.datetime)): 3769 obj = obj.isoformat()[:10] 3770 else: 3771 obj = str(obj) 3772 return "to_date('%s','%%Y-%%m-%%d')" % obj 3773 elif fieldtype == 'datetime': 3774 if isinstance(obj, datetime.datetime): 3775 obj = obj.isoformat()[:19].replace('T',' ') 3776 elif isinstance(obj, datetime.date): 3777 obj = obj.isoformat()[:10]+' 00:00:00' 3778 else: 3779 obj = str(obj) 3780 return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj 3781 return None
3782 3783 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 3784
3785 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3786 credential_decoder=IDENTITY, driver_args={}, 3787 adapter_args={}, do_connect=True, after_connection=None):
3788 self.db = db 3789 self.dbengine = "informix" 3790 self.uri = uri 3791 if do_connect: self.find_driver(adapter_args,uri) 3792 self.pool_size = pool_size 3793 self.folder = folder 3794 self.db_codec = db_codec 3795 self._after_connection = after_connection 3796 self.find_or_make_work_folder() 3797 ruri = uri.split('://',1)[1] 3798 m = self.REGEX_URI.match(ruri) 3799 if not m: 3800 raise SyntaxError( 3801 "Invalid URI string in DAL: %s" % self.uri) 3802 user = credential_decoder(m.group('user')) 3803 if not user: 3804 raise SyntaxError('User required') 3805 password = credential_decoder(m.group('password')) 3806 if not password: 3807 password = '' 3808 host = m.group('host') 3809 if not host: 3810 raise SyntaxError('Host name required') 3811 db = m.group('db') 3812 if not db: 3813 raise SyntaxError('Database name required') 3814 user = credential_decoder(user) 3815 password = credential_decoder(password) 3816 dsn = '%s@%s' % (db,host) 3817 driver_args.update(user=user,password=password,autocommit=True) 3818 def connector(dsn=dsn,driver_args=driver_args): 3819 return self.driver.connect(dsn,**driver_args)
3820 self.connector = connector 3821 if do_connect: self.reconnect()
3822
3823 - def execute(self,command):
3824 if command[-1:]==';': 3825 command = command[:-1] 3826 return self.log_execute(command)
3827
3828 - def lastrowid(self,table):
3829 return self.cursor.sqlerrd[1]
3830
3831 -class InformixSEAdapter(InformixAdapter):
3832 """ work in progress """ 3833
3834 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3835 return 'SELECT %s %s FROM %s%s%s;' % \ 3836 (sql_s, sql_f, sql_t, sql_w, sql_o)
3837
3838 - def rowslice(self,rows,minimum=0,maximum=None):
3839 if maximum is None: 3840 return rows[minimum:] 3841 return rows[minimum:maximum]
3842
3843 -class DB2Adapter(BaseAdapter):
3844 drivers = ('pyodbc',) 3845 3846 types = { 3847 'boolean': 'CHAR(1)', 3848 'string': 'VARCHAR(%(length)s)', 3849 'text': 'CLOB', 3850 'json': 'CLOB', 3851 'password': 'VARCHAR(%(length)s)', 3852 'blob': 'BLOB', 3853 'upload': 'VARCHAR(%(length)s)', 3854 'integer': 'INT', 3855 'bigint': 'BIGINT', 3856 'float': 'REAL', 3857 'double': 'DOUBLE', 3858 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3859 'date': 'DATE', 3860 'time': 'TIME', 3861 'datetime': 'TIMESTAMP', 3862 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3863 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3864 'list:integer': 'CLOB', 3865 'list:string': 'CLOB', 3866 'list:reference': 'CLOB', 3867 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3868 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3869 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3870 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3871 } 3872
3873 - def LEFT_JOIN(self):
3874 return 'LEFT OUTER JOIN'
3875
3876 - def RANDOM(self):
3877 return 'RAND()'
3878
3879 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3880 if limitby: 3881 (lmin, lmax) = limitby 3882 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 3883 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3884
3885 - def represent_exceptions(self, obj, fieldtype):
3886 if fieldtype == 'blob': 3887 obj = base64.b64encode(str(obj)) 3888 return "BLOB('%s')" % obj 3889 elif fieldtype == 'datetime': 3890 if isinstance(obj, datetime.datetime): 3891 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 3892 elif isinstance(obj, datetime.date): 3893 obj = obj.isoformat()[:10]+'-00.00.00' 3894 return "'%s'" % obj 3895 return None
3896
3897 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3898 credential_decoder=IDENTITY, driver_args={}, 3899 adapter_args={}, do_connect=True, after_connection=None):
3900 self.db = db 3901 self.dbengine = "db2" 3902 self.uri = uri 3903 if do_connect: self.find_driver(adapter_args,uri) 3904 self.pool_size = pool_size 3905 self.folder = folder 3906 self.db_codec = db_codec 3907 self._after_connection = after_connection 3908 self.find_or_make_work_folder() 3909 ruri = uri.split('://', 1)[1] 3910 def connector(cnxn=ruri,driver_args=driver_args): 3911 return self.driver.connect(cnxn,**driver_args)
3912 self.connector = connector 3913 if do_connect: self.reconnect()
3914
3915 - def execute(self,command):
3916 if command[-1:]==';': 3917 command = command[:-1] 3918 return self.log_execute(command)
3919
3920 - def lastrowid(self,table):
3921 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 3922 return long(self.cursor.fetchone()[0])
3923
3924 - def rowslice(self,rows,minimum=0,maximum=None):
3925 if maximum is None: 3926 return rows[minimum:] 3927 return rows[minimum:maximum]
3928
3929 3930 -class TeradataAdapter(BaseAdapter):
3931 drivers = ('pyodbc',) 3932 3933 types = { 3934 'boolean': 'CHAR(1)', 3935 'string': 'VARCHAR(%(length)s)', 3936 'text': 'CLOB', 3937 'json': 'CLOB', 3938 'password': 'VARCHAR(%(length)s)', 3939 'blob': 'BLOB', 3940 'upload': 'VARCHAR(%(length)s)', 3941 'integer': 'INT', 3942 'bigint': 'BIGINT', 3943 'float': 'REAL', 3944 'double': 'DOUBLE', 3945 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3946 'date': 'DATE', 3947 'time': 'TIME', 3948 'datetime': 'TIMESTAMP', 3949 # Modified Constraint syntax for Teradata. 3950 # Teradata does not support ON DELETE. 3951 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3952 'reference': 'INT', 3953 'list:integer': 'CLOB', 3954 'list:string': 'CLOB', 3955 'list:reference': 'CLOB', 3956 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3957 'big-reference': 'BIGINT', 3958 'reference FK': ' REFERENCES %(foreign_key)s', 3959 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)', 3960 } 3961
3962 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3963 credential_decoder=IDENTITY, driver_args={}, 3964 adapter_args={}, do_connect=True, after_connection=None):
3965 self.db = db 3966 self.dbengine = "teradata" 3967 self.uri = uri 3968 if do_connect: self.find_driver(adapter_args,uri) 3969 self.pool_size = pool_size 3970 self.folder = folder 3971 self.db_codec = db_codec 3972 self._after_connection = after_connection 3973 self.find_or_make_work_folder() 3974 ruri = uri.split('://', 1)[1] 3975 def connector(cnxn=ruri,driver_args=driver_args): 3976 return self.driver.connect(cnxn,**driver_args)
3977 self.connector = connector 3978 if do_connect: self.reconnect()
3979
3980 - def LEFT_JOIN(self):
3981 return 'LEFT OUTER JOIN'
3982 3983 # Similar to MSSQL, Teradata can't specify a range (for Pageby)
3984 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3985 if limitby: 3986 (lmin, lmax) = limitby 3987 sql_s += ' TOP %i' % lmax 3988 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3989
3990 - def _truncate(self, table, mode=''):
3991 tablename = table._tablename 3992 return ['DELETE FROM %s ALL;' % (tablename)]
3993 3994 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
3995 # (ANSI-SQL wants this form of name 3996 # to be a delimited identifier) 3997 3998 -class IngresAdapter(BaseAdapter):
3999 drivers = ('pyodbc',) 4000 4001 types = { 4002 'boolean': 'CHAR(1)', 4003 'string': 'VARCHAR(%(length)s)', 4004 'text': 'CLOB', 4005 'json': 'CLOB', 4006 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 4007 'blob': 'BLOB', 4008 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 4009 'integer': 'INTEGER4', # or int8... 4010 'bigint': 'BIGINT', 4011 'float': 'FLOAT', 4012 'double': 'FLOAT8', 4013 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4014 'date': 'ANSIDATE', 4015 'time': 'TIME WITHOUT TIME ZONE', 4016 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4017 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME, 4018 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4019 'list:integer': 'CLOB', 4020 'list:string': 'CLOB', 4021 'list:reference': 'CLOB', 4022 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME, 4023 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4024 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4025 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4026 } 4027
4028 - def LEFT_JOIN(self):
4029 return 'LEFT OUTER JOIN'
4030
4031 - def RANDOM(self):
4032 return 'RANDOM()'
4033
4034 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4035 if limitby: 4036 (lmin, lmax) = limitby 4037 fetch_amt = lmax - lmin 4038 if fetch_amt: 4039 sql_s += ' FIRST %d ' % (fetch_amt, ) 4040 if lmin: 4041 # Requires Ingres 9.2+ 4042 sql_o += ' OFFSET %d' % (lmin, ) 4043 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4044
4045 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4046 credential_decoder=IDENTITY, driver_args={}, 4047 adapter_args={}, do_connect=True, after_connection=None):
4048 self.db = db 4049 self.dbengine = "ingres" 4050 self._driver = pyodbc 4051 self.uri = uri 4052 if do_connect: self.find_driver(adapter_args,uri) 4053 self.pool_size = pool_size 4054 self.folder = folder 4055 self.db_codec = db_codec 4056 self._after_connection = after_connection 4057 self.find_or_make_work_folder() 4058 connstr = uri.split(':', 1)[1] 4059 # Simple URI processing 4060 connstr = connstr.lstrip() 4061 while connstr.startswith('/'): 4062 connstr = connstr[1:] 4063 if '=' in connstr: 4064 # Assume we have a regular ODBC connection string and just use it 4065 ruri = connstr 4066 else: 4067 # Assume only (local) dbname is passed in with OS auth 4068 database_name = connstr 4069 default_driver_name = 'Ingres' 4070 vnode = '(local)' 4071 servertype = 'ingres' 4072 ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name) 4073 def connector(cnxn=ruri,driver_args=driver_args): 4074 return self.driver.connect(cnxn,**driver_args)
4075 4076 self.connector = connector 4077 4078 # TODO if version is >= 10, set types['id'] to Identity column, see http://community.actian.com/wiki/Using_Ingres_Identity_Columns 4079 if do_connect: self.reconnect()
4080
4081 - def create_sequence_and_triggers(self, query, table, **args):
4082 # post create table auto inc code (if needed) 4083 # modify table to btree for performance.... 4084 # Older Ingres releases could use rule/trigger like Oracle above. 4085 if hasattr(table,'_primarykey'): 4086 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 4087 (table._tablename, 4088 ', '.join(["'%s'" % x for x in table.primarykey])) 4089 self.execute(modify_tbl_sql) 4090 else: 4091 tmp_seqname='%s_iisq' % table._tablename 4092 query=query.replace(INGRES_SEQNAME, tmp_seqname) 4093 self.execute('create sequence %s' % tmp_seqname) 4094 self.execute(query) 4095 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
4096 4097
4098 - def lastrowid(self,table):
4099 tmp_seqname='%s_iisq' % table 4100 self.execute('select current value for %s' % tmp_seqname) 4101 return long(self.cursor.fetchone()[0]) # don't really need int type cast here...
4102
4103 4104 -class IngresUnicodeAdapter(IngresAdapter):
4105 4106 drivers = ('pyodbc',) 4107 4108 types = { 4109 'boolean': 'CHAR(1)', 4110 'string': 'NVARCHAR(%(length)s)', 4111 'text': 'NCLOB', 4112 'json': 'NCLOB', 4113 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 4114 'blob': 'BLOB', 4115 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 4116 'integer': 'INTEGER4', # or int8... 4117 'bigint': 'BIGINT', 4118 'float': 'FLOAT', 4119 'double': 'FLOAT8', 4120 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4121 'date': 'ANSIDATE', 4122 'time': 'TIME WITHOUT TIME ZONE', 4123 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4124 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME, 4125 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4126 'list:integer': 'NCLOB', 4127 'list:string': 'NCLOB', 4128 'list:reference': 'NCLOB', 4129 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME, 4130 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4131 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4132 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4133 }
4134
4135 -class SAPDBAdapter(BaseAdapter):
4136 drivers = ('sapdb',) 4137 4138 support_distributed_transaction = False 4139 types = { 4140 'boolean': 'CHAR(1)', 4141 'string': 'VARCHAR(%(length)s)', 4142 'text': 'LONG', 4143 'json': 'LONG', 4144 'password': 'VARCHAR(%(length)s)', 4145 'blob': 'LONG', 4146 'upload': 'VARCHAR(%(length)s)', 4147 'integer': 'INT', 4148 'bigint': 'BIGINT', 4149 'float': 'FLOAT', 4150 'double': 'DOUBLE PRECISION', 4151 'decimal': 'FIXED(%(precision)s,%(scale)s)', 4152 'date': 'DATE', 4153 'time': 'TIME', 4154 'datetime': 'TIMESTAMP', 4155 'id': 'INT PRIMARY KEY', 4156 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4157 'list:integer': 'LONG', 4158 'list:string': 'LONG', 4159 'list:reference': 'LONG', 4160 'big-id': 'BIGINT PRIMARY KEY', 4161 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4162 } 4163
4164 - def sequence_name(self,table):
4165 return '%s_id_Seq' % table
4166
4167 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4168 if limitby: 4169 (lmin, lmax) = limitby 4170 if len(sql_w) > 1: 4171 sql_w_row = sql_w + ' AND w_row > %i' % lmin 4172 else: 4173 sql_w_row = 'WHERE w_row > %i' % lmin 4174 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 4175 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4176
4177 - def create_sequence_and_triggers(self, query, table, **args):
4178 # following lines should only be executed if table._sequence_name does not exist 4179 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 4180 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 4181 % (table._tablename, table._id.name, table._sequence_name)) 4182 self.execute(query)
4183 4184 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 4185 4186
4187 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4188 credential_decoder=IDENTITY, driver_args={}, 4189 adapter_args={}, do_connect=True, after_connection=None):
4190 self.db = db 4191 self.dbengine = "sapdb" 4192 self.uri = uri 4193 if do_connect: self.find_driver(adapter_args,uri) 4194 self.pool_size = pool_size 4195 self.folder = folder 4196 self.db_codec = db_codec 4197 self._after_connection = after_connection 4198 self.find_or_make_work_folder() 4199 ruri = uri.split('://',1)[1] 4200 m = self.REGEX_URI.match(ruri) 4201 if not m: 4202 raise SyntaxError("Invalid URI string in DAL") 4203 user = credential_decoder(m.group('user')) 4204 if not user: 4205 raise SyntaxError('User required') 4206 password = credential_decoder(m.group('password')) 4207 if not password: 4208 password = '' 4209 host = m.group('host') 4210 if not host: 4211 raise SyntaxError('Host name required') 4212 db = m.group('db') 4213 if not db: 4214 raise SyntaxError('Database name required') 4215 def connector(user=user, password=password, database=db, 4216 host=host, driver_args=driver_args): 4217 return self.driver.Connection(user, password, database, 4218 host, **driver_args)
4219 self.connector = connector 4220 if do_connect: self.reconnect()
4221
4222 - def lastrowid(self,table):
4223 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 4224 return long(self.cursor.fetchone()[0])
4225
4226 -class CubridAdapter(MySQLAdapter):
4227 drivers = ('cubriddb',) 4228 4229 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 4230
4231 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 4232 credential_decoder=IDENTITY, driver_args={}, 4233 adapter_args={}, do_connect=True, after_connection=None):
4234 self.db = db 4235 self.dbengine = "cubrid" 4236 self.uri = uri 4237 if do_connect: self.find_driver(adapter_args,uri) 4238 self.pool_size = pool_size 4239 self.folder = folder 4240 self.db_codec = db_codec 4241 self._after_connection = after_connection 4242 self.find_or_make_work_folder() 4243 ruri = uri.split('://',1)[1] 4244 m = self.REGEX_URI.match(ruri) 4245 if not m: 4246 raise SyntaxError( 4247 "Invalid URI string in DAL: %s" % self.uri) 4248 user = credential_decoder(m.group('user')) 4249 if not user: 4250 raise SyntaxError('User required') 4251 password = credential_decoder(m.group('password')) 4252 if not password: 4253 password = '' 4254 host = m.group('host') 4255 if not host: 4256 raise SyntaxError('Host name required') 4257 db = m.group('db') 4258 if not db: 4259 raise SyntaxError('Database name required') 4260 port = int(m.group('port') or '30000') 4261 charset = m.group('charset') or 'utf8' 4262 user = credential_decoder(user) 4263 passwd = credential_decoder(password) 4264 def connector(host=host,port=port,db=db, 4265 user=user,passwd=password,driver_args=driver_args): 4266 return self.driver.connect(host,port,db,user,passwd,**driver_args)
4267 self.connector = connector 4268 if do_connect: self.reconnect()
4269
4270 - def after_connection(self):
4271 self.execute('SET FOREIGN_KEY_CHECKS=1;') 4272 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4273
4274 4275 ######## GAE MySQL ########## 4276 4277 -class DatabaseStoredFile:
4278 4279 web2py_filesystem = False 4280
4281 - def escape(self,obj):
4282 return self.db._adapter.escape(obj)
4283
4284 - def __init__(self,db,filename,mode):
4285 if not db._adapter.dbengine in ('mysql', 'postgres', 'sqlite'): 4286 raise RuntimeError("only MySQL/Postgres/SQLite can store metadata .table files in database for now") 4287 self.db = db 4288 self.filename = filename 4289 self.mode = mode 4290 if not self.web2py_filesystem: 4291 if db._adapter.dbengine == 'mysql': 4292 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;" 4293 elif db._adapter.dbengine in ('postgres', 'sqlite'): 4294 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));" 4295 self.db.executesql(sql) 4296 DatabaseStoredFile.web2py_filesystem = True 4297 self.p=0 4298 self.data = '' 4299 if mode in ('r','rw','a'): 4300 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ 4301 % filename 4302 rows = self.db.executesql(query) 4303 if rows: 4304 self.data = rows[0][0] 4305 elif exists(filename): 4306 datafile = open(filename, 'r') 4307 try: 4308 self.data = datafile.read() 4309 finally: 4310 datafile.close() 4311 elif mode in ('r','rw'): 4312 raise RuntimeError("File %s does not exist" % filename)
4313
4314 - def read(self, bytes):
4315 data = self.data[self.p:self.p+bytes] 4316 self.p += len(data) 4317 return data
4318
4319 - def readline(self):
4320 i = self.data.find('\n',self.p)+1 4321 if i>0: 4322 data, self.p = self.data[self.p:i], i 4323 else: 4324 data, self.p = self.data[self.p:], len(self.data) 4325 return data
4326
4327 - def write(self,data):
4328 self.data += data
4329
4330 - def close_connection(self):
4331 if self.db is not None: 4332 self.db.executesql( 4333 "DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) 4334 query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\ 4335 % (self.filename, self.data.replace("'","''")) 4336 self.db.executesql(query) 4337 self.db.commit() 4338 self.db = None
4339
4340 - def close(self):
4341 self.close_connection()
4342 4343 @staticmethod
4344 - def exists(db, filename):
4345 if exists(filename): 4346 return True 4347 query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename 4348 try: 4349 if db.executesql(query): 4350 return True 4351 except Exception, e: 4352 if not (db._adapter.isOperationalError(e) or 4353 db._adapter.isProgrammingError(e)): 4354 raise 4355 # no web2py_filesystem found? 4356 tb = traceback.format_exc() 4357 LOGGER.error("Could not retrieve %s\n%s" % (filename, tb)) 4358 return False
4359
4360 4361 -class UseDatabaseStoredFile:
4362
4363 - def file_exists(self, filename):
4364 return DatabaseStoredFile.exists(self.db,filename)
4365
4366 - def file_open(self, filename, mode='rb', lock=True):
4367 return DatabaseStoredFile(self.db,filename,mode)
4368
4369 - def file_close(self, fileobj):
4370 fileobj.close_connection()
4371
4372 - def file_delete(self,filename):
4373 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 4374 self.db.executesql(query) 4375 self.db.commit()
4376
4377 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
4378 uploads_in_blob = True 4379 4380 REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$') 4381
4382 - def __init__(self, db, uri='google:sql://realm:domain/database', 4383 pool_size=0, folder=None, db_codec='UTF-8', 4384 credential_decoder=IDENTITY, driver_args={}, 4385 adapter_args={}, do_connect=True, after_connection=None):
4386 4387 self.db = db 4388 self.dbengine = "mysql" 4389 self.uri = uri 4390 self.pool_size = pool_size 4391 self.db_codec = db_codec 4392 self._after_connection = after_connection 4393 if do_connect: self.find_driver(adapter_args, uri) 4394 self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split( 4395 os.sep+'applications'+os.sep,1)[1]) 4396 ruri = uri.split("://")[1] 4397 m = self.REGEX_URI.match(ruri) 4398 if not m: 4399 raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri) 4400 instance = credential_decoder(m.group('instance')) 4401 self.dbstring = db = credential_decoder(m.group('db')) 4402 driver_args['instance'] = instance 4403 if not 'charset' in driver_args: 4404 driver_args['charset'] = 'utf8' 4405 self.createdb = createdb = adapter_args.get('createdb',True) 4406 if not createdb: 4407 driver_args['database'] = db 4408 def connector(driver_args=driver_args): 4409 return rdbms.connect(**driver_args)
4410 self.connector = connector 4411 if do_connect: self.reconnect()
4412
4413 - def after_connection(self):
4414 if self.createdb: 4415 # self.execute('DROP DATABASE %s' % self.dbstring) 4416 self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring) 4417 self.execute('USE %s' % self.dbstring) 4418 self.execute("SET FOREIGN_KEY_CHECKS=1;") 4419 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4420
4421 - def execute(self, command, *a, **b):
4422 return self.log_execute(command.decode('utf8'), *a, **b)
4423
4424 - def find_driver(self,adapter_args,uri=None):
4425 self.adapter_args = adapter_args 4426 self.driver = "google"
4427
4428 -class NoSQLAdapter(BaseAdapter):
4429 can_select_for_update = False 4430 4431 @staticmethod
4432 - def to_unicode(obj):
4433 if isinstance(obj, str): 4434 return obj.decode('utf8') 4435 elif not isinstance(obj, unicode): 4436 return unicode(obj) 4437 return obj
4438
4439 - def id_query(self, table):
4440 return table._id > 0
4441
4442 - def represent(self, obj, fieldtype):
4443 field_is_type = fieldtype.startswith 4444 if isinstance(obj, CALLABLETYPES): 4445 obj = obj() 4446 if isinstance(fieldtype, SQLCustomType): 4447 return fieldtype.encoder(obj) 4448 if isinstance(obj, (Expression, Field)): 4449 raise SyntaxError("non supported on GAE") 4450 if self.dbengine == 'google:datastore': 4451 if isinstance(fieldtype, gae.Property): 4452 return obj 4453 is_string = isinstance(fieldtype,str) 4454 is_list = is_string and field_is_type('list:') 4455 if is_list: 4456 if not obj: 4457 obj = [] 4458 if not isinstance(obj, (list, tuple)): 4459 obj = [obj] 4460 if obj == '' and not \ 4461 (is_string and fieldtype[:2] in ['st','te', 'pa','up']): 4462 return None 4463 if not obj is None: 4464 if isinstance(obj, list) and not is_list: 4465 obj = [self.represent(o, fieldtype) for o in obj] 4466 elif fieldtype in ('integer','bigint','id'): 4467 obj = long(obj) 4468 elif fieldtype == 'double': 4469 obj = float(obj) 4470 elif is_string and field_is_type('reference'): 4471 if isinstance(obj, (Row, Reference)): 4472 obj = obj['id'] 4473 obj = long(obj) 4474 elif fieldtype == 'boolean': 4475 if obj and not str(obj)[0].upper() in '0F': 4476 obj = True 4477 else: 4478 obj = False 4479 elif fieldtype == 'date': 4480 if not isinstance(obj, datetime.date): 4481 (y, m, d) = map(int,str(obj).strip().split('-')) 4482 obj = datetime.date(y, m, d) 4483 elif isinstance(obj,datetime.datetime): 4484 (y, m, d) = (obj.year, obj.month, obj.day) 4485 obj = datetime.date(y, m, d) 4486 elif fieldtype == 'time': 4487 if not isinstance(obj, datetime.time): 4488 time_items = map(int,str(obj).strip().split(':')[:3]) 4489 if len(time_items) == 3: 4490 (h, mi, s) = time_items 4491 else: 4492 (h, mi, s) = time_items + [0] 4493 obj = datetime.time(h, mi, s) 4494 elif fieldtype == 'datetime': 4495 if not isinstance(obj, datetime.datetime): 4496 (y, m, d) = map(int,str(obj)[:10].strip().split('-')) 4497 time_items = map(int,str(obj)[11:].strip().split(':')[:3]) 4498 while len(time_items)<3: 4499 time_items.append(0) 4500 (h, mi, s) = time_items 4501 obj = datetime.datetime(y, m, d, h, mi, s) 4502 elif fieldtype == 'blob': 4503 pass 4504 elif fieldtype == 'json': 4505 if isinstance(obj, basestring): 4506 obj = self.to_unicode(obj) 4507 if have_serializers: 4508 obj = serializers.loads_json(obj) 4509 elif simplejson: 4510 obj = simplejson.loads(obj) 4511 else: 4512 raise RuntimeError("missing simplejson") 4513 elif is_string and field_is_type('list:string'): 4514 return map(self.to_unicode,obj) 4515 elif is_list: 4516 return map(int,obj) 4517 else: 4518 obj = self.to_unicode(obj) 4519 return obj
4520
4521 - def _insert(self,table,fields):
4522 return 'insert %s in %s' % (fields, table)
4523
4524 - def _count(self,query,distinct=None):
4525 return 'count %s' % repr(query)
4526
4527 - def _select(self,query,fields,attributes):
4528 return 'select %s where %s' % (repr(fields), repr(query))
4529
4530 - def _delete(self,tablename, query):
4531 return 'delete %s where %s' % (repr(tablename),repr(query))
4532
4533 - def _update(self,tablename,query,fields):
4534 return 'update %s (%s) where %s' % (repr(tablename), 4535 repr(fields),repr(query))
4536
4537 - def commit(self):
4538 """ 4539 remember: no transactions on many NoSQL 4540 """ 4541 pass
4542
4543 - def rollback(self):
4544 """ 4545 remember: no transactions on many NoSQL 4546 """ 4547 pass
4548
4549 - def close_connection(self):
4550 """ 4551 remember: no transactions on many NoSQL 4552 """ 4553 pass
4554 4555 4556 # these functions should never be called!
4557 - def OR(self,first,second): raise SyntaxError("Not supported")
4558 - def AND(self,first,second): raise SyntaxError("Not supported")
4559 - def AS(self,first,second): raise SyntaxError("Not supported")
4560 - def ON(self,first,second): raise SyntaxError("Not supported")
4561 - def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported")
4562 - def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported")
4563 - def ADD(self,first,second): raise SyntaxError("Not supported")
4564 - def SUB(self,first,second): raise SyntaxError("Not supported")
4565 - def MUL(self,first,second): raise SyntaxError("Not supported")
4566 - def DIV(self,first,second): raise SyntaxError("Not supported")
4567 - def LOWER(self,first): raise SyntaxError("Not supported")
4568 - def UPPER(self,first): raise SyntaxError("Not supported")
4569 - def EXTRACT(self,first,what): raise SyntaxError("Not supported")
4570 - def LENGTH(self, first): raise SyntaxError("Not supported")
4571 - def AGGREGATE(self,first,what): raise SyntaxError("Not supported")
4572 - def LEFT_JOIN(self): raise SyntaxError("Not supported")
4573 - def RANDOM(self): raise SyntaxError("Not supported")
4574 - def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported")
4575 - def PRIMARY_KEY(self,key): raise SyntaxError("Not supported")
4576 - def ILIKE(self,first,second): raise SyntaxError("Not supported")
4577 - def drop(self,table,mode): raise SyntaxError("Not supported")
4578 - def alias(self,table,alias): raise SyntaxError("Not supported")
4579 - def migrate_table(self,*a,**b): raise SyntaxError("Not supported")
4580 - def distributed_transaction_begin(self,key): raise SyntaxError("Not supported")
4581 - def prepare(self,key): raise SyntaxError("Not supported")
4582 - def commit_prepared(self,key): raise SyntaxError("Not supported")
4583 - def rollback_prepared(self,key): raise SyntaxError("Not supported")
4584 - def concat_add(self,table): raise SyntaxError("Not supported")
4585 - def constraint_name(self, table, fieldname): raise SyntaxError("Not supported")
4586 - def create_sequence_and_triggers(self, query, table, **args): pass
4587 - def log_execute(self,*a,**b): raise SyntaxError("Not supported")
4588 - def execute(self,*a,**b): raise SyntaxError("Not supported")
4589 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported")
4590 - def lastrowid(self,table): raise SyntaxError("Not supported")
4591 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported")
4592
4593 4594 -class GAEF(object):
4595 - def __init__(self,name,op,value,apply):
4596 self.name=name=='id' and '__key__' or name 4597 self.op=op 4598 self.value=value 4599 self.apply=apply
4600 - def __repr__(self):
4601 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
4602
4603 -class GoogleDatastoreAdapter(NoSQLAdapter):
4604 uploads_in_blob = True 4605 types = {} 4606
4607 - def file_exists(self, filename): pass
4608 - def file_open(self, filename, mode='rb', lock=True): pass
4609 - def file_close(self, fileobj): pass
4610 4611 REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)') 4612
4613 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4614 credential_decoder=IDENTITY, driver_args={}, 4615 adapter_args={}, do_connect=True, after_connection=None):
4616 self.types.update({ 4617 'boolean': gae.BooleanProperty, 4618 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)), 4619 'text': gae.TextProperty, 4620 'json': gae.TextProperty, 4621 'password': gae.StringProperty, 4622 'blob': gae.BlobProperty, 4623 'upload': gae.StringProperty, 4624 'integer': gae.IntegerProperty, 4625 'bigint': gae.IntegerProperty, 4626 'float': gae.FloatProperty, 4627 'double': gae.FloatProperty, 4628 'decimal': GAEDecimalProperty, 4629 'date': gae.DateProperty, 4630 'time': gae.TimeProperty, 4631 'datetime': gae.DateTimeProperty, 4632 'id': None, 4633 'reference': gae.IntegerProperty, 4634 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)), 4635 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4636 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4637 }) 4638 self.db = db 4639 self.uri = uri 4640 self.dbengine = 'google:datastore' 4641 self.folder = folder 4642 db['_lastsql'] = '' 4643 self.db_codec = 'UTF-8' 4644 self._after_connection = after_connection 4645 self.pool_size = 0 4646 match = self.REGEX_NAMESPACE.match(uri) 4647 if match: 4648 namespace_manager.set_namespace(match.group('namespace'))
4649
4650 - def parse_id(self, value, field_type):
4651 return value
4652
4653 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
4654 myfields = {} 4655 for field in table: 4656 if isinstance(polymodel,Table) and field.name in polymodel.fields(): 4657 continue 4658 attr = {} 4659 if isinstance(field.custom_qualifier, dict): 4660 #this is custom properties to add to the GAE field declartion 4661 attr = field.custom_qualifier 4662 field_type = field.type 4663 if isinstance(field_type, SQLCustomType): 4664 ftype = self.types[field_type.native or field_type.type](**attr) 4665 elif isinstance(field_type, gae.Property): 4666 ftype = field_type 4667 elif field_type.startswith('id'): 4668 continue 4669 elif field_type.startswith('decimal'): 4670 precision, scale = field_type[7:].strip('()').split(',') 4671 precision = int(precision) 4672 scale = int(scale) 4673 ftype = GAEDecimalProperty(precision, scale, **attr) 4674 elif field_type.startswith('reference'): 4675 if field.notnull: 4676 attr = dict(required=True) 4677 referenced = field_type[10:].strip() 4678 ftype = self.types[field_type[:9]](referenced, **attr) 4679 elif field_type.startswith('list:reference'): 4680 if field.notnull: 4681 attr['required'] = True 4682 referenced = field_type[15:].strip() 4683 ftype = self.types[field_type[:14]](**attr) 4684 elif field_type.startswith('list:'): 4685 ftype = self.types[field_type](**attr) 4686 elif not field_type in self.types\ 4687 or not self.types[field_type]: 4688 raise SyntaxError('Field: unknown field type: %s' % field_type) 4689 else: 4690 ftype = self.types[field_type](**attr) 4691 myfields[field.name] = ftype 4692 if not polymodel: 4693 table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) 4694 elif polymodel==True: 4695 table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) 4696 elif isinstance(polymodel,Table): 4697 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 4698 else: 4699 raise SyntaxError("polymodel must be None, True, a table or a tablename") 4700 return None
4701
4702 - def expand(self,expression,field_type=None):
4703 if isinstance(expression,Field): 4704 if expression.type in ('text', 'blob', 'json'): 4705 raise SyntaxError('AppEngine does not index by: %s' % expression.type) 4706 return expression.name 4707 elif isinstance(expression, (Expression, Query)): 4708 if not expression.second is None: 4709 return expression.op(expression.first, expression.second) 4710 elif not expression.first is None: 4711 return expression.op(expression.first) 4712 else: 4713 return expression.op() 4714 elif field_type: 4715 return self.represent(expression,field_type) 4716 elif isinstance(expression,(list,tuple)): 4717 return ','.join([self.represent(item,field_type) for item in expression]) 4718 else: 4719 return str(expression)
4720 4721 ### TODO from gql.py Expression
4722 - def AND(self,first,second):
4723 a = self.expand(first) 4724 b = self.expand(second) 4725 if b[0].name=='__key__' and a[0].name!='__key__': 4726 return b+a 4727 return a+b
4728
4729 - def EQ(self,first,second=None):
4730 if isinstance(second, Key): 4731 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 4732 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
4733
4734 - def NE(self,first,second=None):
4735 if first.type != 'id': 4736 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 4737 else: 4738 if not second is None: 4739 second = Key.from_path(first._tablename, long(second)) 4740 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
4741
4742 - def LT(self,first,second=None):
4743 if first.type != 'id': 4744 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 4745 else: 4746 second = Key.from_path(first._tablename, long(second)) 4747 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
4748
4749 - def LE(self,first,second=None):
4750 if first.type != 'id': 4751 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 4752 else: 4753 second = Key.from_path(first._tablename, long(second)) 4754 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
4755
4756 - def GT(self,first,second=None):
4757 if first.type != 'id' or second==0 or second == '0': 4758 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 4759 else: 4760 second = Key.from_path(first._tablename, long(second)) 4761 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
4762
4763 - def GE(self,first,second=None):
4764 if first.type != 'id': 4765 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 4766 else: 4767 second = Key.from_path(first._tablename, long(second)) 4768 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
4769
4770 - def INVERT(self,first):
4771 return '-%s' % first.name
4772
4773 - def COMMA(self,first,second):
4774 return '%s, %s' % (self.expand(first),self.expand(second))
4775
4776 - def BELONGS(self,first,second=None):
4777 if not isinstance(second,(list, tuple)): 4778 raise SyntaxError("Not supported") 4779 if first.type != 'id': 4780 return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] 4781 else: 4782 second = [Key.from_path(first._tablename, int(i)) for i in second] 4783 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
4784
4785 - def CONTAINS(self,first,second,case_sensitive=False):
4786 # silently ignoring: GAE can only do case sensitive matches! 4787 if not first.type.startswith('list:'): 4788 raise SyntaxError("Not supported") 4789 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
4790
4791 - def NOT(self,first):
4792 nops = { self.EQ: self.NE, 4793 self.NE: self.EQ, 4794 self.LT: self.GE, 4795 self.GT: self.LE, 4796 self.LE: self.GT, 4797 self.GE: self.LT} 4798 if not isinstance(first,Query): 4799 raise SyntaxError("Not suported") 4800 nop = nops.get(first.op,None) 4801 if not nop: 4802 raise SyntaxError("Not suported %s" % first.op.__name__) 4803 first.op = nop 4804 return self.expand(first)
4805
4806 - def truncate(self,table,mode):
4807 self.db(self.db._adapter.id_query(table)).delete()
4808
4809 - def select_raw(self,query,fields=None,attributes=None):
4810 db = self.db 4811 fields = fields or [] 4812 attributes = attributes or {} 4813 args_get = attributes.get 4814 new_fields = [] 4815 for item in fields: 4816 if isinstance(item,SQLALL): 4817 new_fields += item._table 4818 else: 4819 new_fields.append(item) 4820 fields = new_fields 4821 if query: 4822 tablename = self.get_table(query) 4823 elif fields: 4824 tablename = fields[0].tablename 4825 query = db._adapter.id_query(fields[0].table) 4826 else: 4827 raise SyntaxError("Unable to determine a tablename") 4828 4829 if query: 4830 if use_common_filters(query): 4831 query = self.common_filter(query,[tablename]) 4832 4833 #tableobj is a GAE Model class (or subclass) 4834 tableobj = db[tablename]._tableobj 4835 filters = self.expand(query) 4836 4837 projection = None 4838 if len(db[tablename].fields) == len(fields): 4839 #getting all fields, not a projection query 4840 projection = None 4841 elif args_get('projection') == True: 4842 projection = [] 4843 for f in fields: 4844 if f.type in ['text', 'blob', 'json']: 4845 raise SyntaxError( 4846 "text and blob field types not allowed in projection queries") 4847 else: 4848 projection.append(f.name) 4849 elif args_get('filterfields') == True: 4850 projection = [] 4851 for f in fields: 4852 projection.append(f.name) 4853 4854 # real projection's can't include 'id'. 4855 # it will be added to the result later 4856 query_projection = [ 4857 p for p in projection if \ 4858 p != db[tablename]._id.name] if projection and \ 4859 args_get('projection') == True\ 4860 else None 4861 4862 cursor = None 4863 if isinstance(args_get('reusecursor'), str): 4864 cursor = args_get('reusecursor') 4865 items = gae.Query(tableobj, projection=query_projection, 4866 cursor=cursor) 4867 4868 for filter in filters: 4869 if args_get('projection') == True and \ 4870 filter.name in query_projection and \ 4871 filter.op in ['=', '<=', '>=']: 4872 raise SyntaxError( 4873 "projection fields cannot have equality filters") 4874 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 4875 continue 4876 elif filter.name=='__key__' and filter.op=='=': 4877 if filter.value==0: 4878 items = [] 4879 elif isinstance(filter.value, Key): 4880 # key qeuries return a class instance, 4881 # can't use projection 4882 # extra values will be ignored in post-processing later 4883 item = tableobj.get(filter.value) 4884 items = (item and [item]) or [] 4885 else: 4886 # key qeuries return a class instance, 4887 # can't use projection 4888 # extra values will be ignored in post-processing later 4889 item = tableobj.get_by_id(filter.value) 4890 items = (item and [item]) or [] 4891 elif isinstance(items,list): # i.e. there is a single record! 4892 items = [i for i in items if filter.apply( 4893 getattr(item,filter.name),filter.value)] 4894 else: 4895 if filter.name=='__key__' and filter.op != 'in': 4896 items.order('__key__') 4897 items = items.filter('%s %s' % (filter.name,filter.op), 4898 filter.value) 4899 if not isinstance(items,list): 4900 if args_get('left', None): 4901 raise SyntaxError('Set: no left join in appengine') 4902 if args_get('groupby', None): 4903 raise SyntaxError('Set: no groupby in appengine') 4904 orderby = args_get('orderby', False) 4905 if orderby: 4906 ### THIS REALLY NEEDS IMPROVEMENT !!! 4907 if isinstance(orderby, (list, tuple)): 4908 orderby = xorify(orderby) 4909 if isinstance(orderby,Expression): 4910 orderby = self.expand(orderby) 4911 orders = orderby.split(', ') 4912 for order in orders: 4913 order={'-id':'-__key__','id':'__key__'}.get(order,order) 4914 items = items.order(order) 4915 if args_get('limitby', None): 4916 (lmin, lmax) = attributes['limitby'] 4917 (limit, offset) = (lmax - lmin, lmin) 4918 rows = items.fetch(limit,offset=offset) 4919 #cursor is only useful if there was a limit and we didn't return 4920 # all results 4921 if args_get('reusecursor'): 4922 db['_lastcursor'] = items.cursor() 4923 items = rows 4924 return (items, tablename, projection or db[tablename].fields)
4925
4926 - def select(self,query,fields,attributes):
4927 """ 4928 This is the GAE version of select. some notes to consider: 4929 - db['_lastsql'] is not set because there is not SQL statement string 4930 for a GAE query 4931 - 'nativeRef' is a magical fieldname used for self references on GAE 4932 - optional attribute 'projection' when set to True will trigger 4933 use of the GAE projection queries. note that there are rules for 4934 what is accepted imposed by GAE: each field must be indexed, 4935 projection queries cannot contain blob or text fields, and you 4936 cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection 4937 - optional attribute 'filterfields' when set to True web2py will only 4938 parse the explicitly listed fields into the Rows object, even though 4939 all fields are returned in the query. This can be used to reduce 4940 memory usage in cases where true projection queries are not 4941 usable. 4942 - optional attribute 'reusecursor' allows use of cursor with queries 4943 that have the limitby attribute. Set the attribute to True for the 4944 first query, set it to the value of db['_lastcursor'] to continue 4945 a previous query. The user must save the cursor value between 4946 requests, and the filters must be identical. It is up to the user 4947 to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors 4948 """ 4949 4950 (items, tablename, fields) = self.select_raw(query,fields,attributes) 4951 # self.db['_lastsql'] = self._select(query,fields,attributes) 4952 rows = [[(t==self.db[tablename]._id.name and item) or \ 4953 (t=='nativeRef' and item) or getattr(item, t) \ 4954 for t in fields] for item in items] 4955 colnames = ['%s.%s' % (tablename, t) for t in fields] 4956 processor = attributes.get('processor',self.parse) 4957 return processor(rows,fields,colnames,False)
4958
4959 - def count(self,query,distinct=None,limit=None):
4960 if distinct: 4961 raise RuntimeError("COUNT DISTINCT not supported") 4962 (items, tablename, fields) = self.select_raw(query) 4963 # self.db['_lastsql'] = self._count(query) 4964 try: 4965 return len(items) 4966 except TypeError: 4967 return items.count(limit=limit)
4968
4969 - def delete(self,tablename, query):
4970 """ 4971 This function was changed on 2010-05-04 because according to 4972 http://code.google.com/p/googleappengine/issues/detail?id=3119 4973 GAE no longer supports deleting more than 1000 records. 4974 """ 4975 # self.db['_lastsql'] = self._delete(tablename,query) 4976 (items, tablename, fields) = self.select_raw(query) 4977 # items can be one item or a query 4978 if not isinstance(items,list): 4979 #use a keys_only query to ensure that this runs as a datastore 4980 # small operations 4981 leftitems = items.fetch(1000, keys_only=True) 4982 counter = 0 4983 while len(leftitems): 4984 counter += len(leftitems) 4985 gae.delete(leftitems) 4986 leftitems = items.fetch(1000, keys_only=True) 4987 else: 4988 counter = len(items) 4989 gae.delete(items) 4990 return counter
4991
4992 - def update(self,tablename,query,update_fields):
4993 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 4994 (items, tablename, fields) = self.select_raw(query) 4995 counter = 0 4996 for item in items: 4997 for field, value in update_fields: 4998 setattr(item, field.name, self.represent(value,field.type)) 4999 item.put() 5000 counter += 1 5001 LOGGER.info(str(counter)) 5002 return counter
5003
5004 - def insert(self,table,fields):
5005 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 5006 # table._db['_lastsql'] = self._insert(table,fields) 5007 tmp = table._tableobj(**dfields) 5008 tmp.put() 5009 rid = Reference(tmp.key().id()) 5010 (rid._table, rid._record, rid._gaekey) = (table, None, tmp.key()) 5011 return rid
5012
5013 - def bulk_insert(self,table,items):
5014 parsed_items = [] 5015 for item in items: 5016 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 5017 parsed_items.append(table._tableobj(**dfields)) 5018 gae.put(parsed_items) 5019 return True
5020
5021 -def uuid2int(uuidv):
5022 return uuid.UUID(uuidv).int
5023
5024 -def int2uuid(n):
5025 return str(uuid.UUID(int=n))
5026
5027 -class CouchDBAdapter(NoSQLAdapter):
5028 drivers = ('couchdb',) 5029 5030 uploads_in_blob = True 5031 types = { 5032 'boolean': bool, 5033 'string': str, 5034 'text': str, 5035 'json': str, 5036 'password': str, 5037 'blob': str, 5038 'upload': str, 5039 'integer': long, 5040 'bigint': long, 5041 'float': float, 5042 'double': float, 5043 'date': datetime.date, 5044 'time': datetime.time, 5045 'datetime': datetime.datetime, 5046 'id': long, 5047 'reference': long, 5048 'list:string': list, 5049 'list:integer': list, 5050 'list:reference': list, 5051 } 5052
5053 - def file_exists(self, filename): pass
5054 - def file_open(self, filename, mode='rb', lock=True): pass
5055 - def file_close(self, fileobj): pass
5056
5057 - def expand(self,expression,field_type=None):
5058 if isinstance(expression,Field): 5059 if expression.type=='id': 5060 return "%s._id" % expression.tablename 5061 return BaseAdapter.expand(self,expression,field_type)
5062
5063 - def AND(self,first,second):
5064 return '(%s && %s)' % (self.expand(first),self.expand(second))
5065
5066 - def OR(self,first,second):
5067 return '(%s || %s)' % (self.expand(first),self.expand(second))
5068
5069 - def EQ(self,first,second):
5070 if second is None: 5071 return '(%s == null)' % self.expand(first) 5072 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
5073
5074 - def NE(self,first,second):
5075 if second is None: 5076 return '(%s != null)' % self.expand(first) 5077 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
5078
5079 - def COMMA(self,first,second):
5080 return '%s + %s' % (self.expand(first),self.expand(second))
5081
5082 - def represent(self, obj, fieldtype):
5083 value = NoSQLAdapter.represent(self, obj, fieldtype) 5084 if fieldtype=='id': 5085 return repr(str(long(value))) 5086 elif fieldtype in ('date','time','datetime','boolean'): 5087 return serializers.json(value) 5088 return repr(not isinstance(value,unicode) and value \ 5089 or value and value.encode('utf8'))
5090
5091 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 5092 pool_size=0,folder=None,db_codec ='UTF-8', 5093 credential_decoder=IDENTITY, driver_args={}, 5094 adapter_args={}, do_connect=True, after_connection=None):
5095 self.db = db 5096 self.uri = uri 5097 if do_connect: self.find_driver(adapter_args) 5098 self.dbengine = 'couchdb' 5099 self.folder = folder 5100 db['_lastsql'] = '' 5101 self.db_codec = 'UTF-8' 5102 self._after_connection = after_connection 5103 self.pool_size = pool_size 5104 5105 url='http://'+uri[10:] 5106 def connector(url=url,driver_args=driver_args): 5107 return self.driver.Server(url,**driver_args)
5108 self.reconnect(connector,cursor=False)
5109
5110 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
5111 if migrate: 5112 try: 5113 self.connection.create(table._tablename) 5114 except: 5115 pass
5116
5117 - def insert(self,table,fields):
5118 id = uuid2int(web2py_uuid()) 5119 ctable = self.connection[table._tablename] 5120 values = dict((k.name,self.represent(v,k.type)) for k,v in fields) 5121 values['_id'] = str(id) 5122 ctable.save(values) 5123 return id
5124
5125 - def _select(self,query,fields,attributes):
5126 if not isinstance(query,Query): 5127 raise SyntaxError("Not Supported") 5128 for key in set(attributes.keys())-SELECT_ARGS: 5129 raise SyntaxError('invalid select attribute: %s' % key) 5130 new_fields=[] 5131 for item in fields: 5132 if isinstance(item,SQLALL): 5133 new_fields += item._table 5134 else: 5135 new_fields.append(item) 5136 def uid(fd): 5137 return fd=='id' and '_id' or fd
5138 def get(row,fd): 5139 return fd=='id' and long(row['_id']) or row.get(fd,None) 5140 fields = new_fields 5141 tablename = self.get_table(query) 5142 fieldnames = [f.name for f in (fields or self.db[tablename])] 5143 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 5144 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 5145 fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\ 5146 dict(t=tablename, 5147 query=self.expand(query), 5148 order='%s._id' % tablename, 5149 fields=fields) 5150 return fn, colnames 5151
5152 - def select(self,query,fields,attributes):
5153 if not isinstance(query,Query): 5154 raise SyntaxError("Not Supported") 5155 fn, colnames = self._select(query,fields,attributes) 5156 tablename = colnames[0].split('.')[0] 5157 ctable = self.connection[tablename] 5158 rows = [cols['value'] for cols in ctable.query(fn)] 5159 processor = attributes.get('processor',self.parse) 5160 return processor(rows,fields,colnames,False)
5161
5162 - def delete(self,tablename,query):
5163 if not isinstance(query,Query): 5164 raise SyntaxError("Not Supported") 5165 if query.first.type=='id' and query.op==self.EQ: 5166 id = query.second 5167 tablename = query.first.tablename 5168 assert(tablename == query.first.tablename) 5169 ctable = self.connection[tablename] 5170 try: 5171 del ctable[str(id)] 5172 return 1 5173 except couchdb.http.ResourceNotFound: 5174 return 0 5175 else: 5176 tablename = self.get_table(query) 5177 rows = self.select(query,[self.db[tablename]._id],{}) 5178 ctable = self.connection[tablename] 5179 for row in rows: 5180 del ctable[str(row.id)] 5181 return len(rows)
5182
5183 - def update(self,tablename,query,fields):
5184 if not isinstance(query,Query): 5185 raise SyntaxError("Not Supported") 5186 if query.first.type=='id' and query.op==self.EQ: 5187 id = query.second 5188 tablename = query.first.tablename 5189 ctable = self.connection[tablename] 5190 try: 5191 doc = ctable[str(id)] 5192 for key,value in fields: 5193 doc[key.name] = self.represent(value,self.db[tablename][key.name].type) 5194 ctable.save(doc) 5195 return 1 5196 except couchdb.http.ResourceNotFound: 5197 return 0 5198 else: 5199 tablename = self.get_table(query) 5200 rows = self.select(query,[self.db[tablename]._id],{}) 5201 ctable = self.connection[tablename] 5202 table = self.db[tablename] 5203 for row in rows: 5204 doc = ctable[str(row.id)] 5205 for key,value in fields: 5206 doc[key.name] = self.represent(value,table[key.name].type) 5207 ctable.save(doc) 5208 return len(rows)
5209
5210 - def count(self,query,distinct=None):
5211 if distinct: 5212 raise RuntimeError("COUNT DISTINCT not supported") 5213 if not isinstance(query,Query): 5214 raise SyntaxError("Not Supported") 5215 tablename = self.get_table(query) 5216 rows = self.select(query,[self.db[tablename]._id],{}) 5217 return len(rows)
5218
5219 -def cleanup(text):
5220 """ 5221 validates that the given text is clean: only contains [0-9a-zA-Z_] 5222 """ 5223 if not REGEX_ALPHANUMERIC.match(text): 5224 raise SyntaxError('invalid table or field name: %s' % text) 5225 return text
5226
5227 -class MongoDBAdapter(NoSQLAdapter):
5228 native_json = True 5229 drivers = ('pymongo',) 5230 5231 uploads_in_blob = True 5232 5233 types = { 5234 'boolean': bool, 5235 'string': str, 5236 'text': str, 5237 'json': str, 5238 'password': str, 5239 'blob': str, 5240 'upload': str, 5241 'integer': long, 5242 'bigint': long, 5243 'float': float, 5244 'double': float, 5245 'date': datetime.date, 5246 'time': datetime.time, 5247 'datetime': datetime.datetime, 5248 'id': long, 5249 'reference': long, 5250 'list:string': list, 5251 'list:integer': list, 5252 'list:reference': list, 5253 } 5254 5255 error_messages = {"javascript_needed": "This must yet be replaced" + 5256 " with javascript in order to work."} 5257
5258 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 5259 pool_size=0, folder=None, db_codec ='UTF-8', 5260 credential_decoder=IDENTITY, driver_args={}, 5261 adapter_args={}, do_connect=True, after_connection=None):
5262 5263 self.db = db 5264 self.uri = uri 5265 if do_connect: self.find_driver(adapter_args) 5266 import random 5267 from bson.objectid import ObjectId 5268 from bson.son import SON 5269 import pymongo.uri_parser 5270 5271 m = pymongo.uri_parser.parse_uri(uri) 5272 5273 self.SON = SON 5274 self.ObjectId = ObjectId 5275 self.random = random 5276 5277 self.dbengine = 'mongodb' 5278 self.folder = folder 5279 db['_lastsql'] = '' 5280 self.db_codec = 'UTF-8' 5281 self._after_connection = after_connection 5282 self.pool_size = pool_size 5283 #this is the minimum amount of replicates that it should wait 5284 # for on insert/update 5285 self.minimumreplication = adapter_args.get('minimumreplication',0) 5286 # by default all inserts and selects are performand asynchronous, 5287 # but now the default is 5288 # synchronous, except when overruled by either this default or 5289 # function parameter 5290 self.safe = adapter_args.get('safe',True) 5291 5292 if isinstance(m,tuple): 5293 m = {"database" : m[1]} 5294 if m.get('database')==None: 5295 raise SyntaxError("Database is required!") 5296 5297 def connector(uri=self.uri,m=m): 5298 # Connection() is deprecated 5299 if hasattr(self.driver, "MongoClient"): 5300 Connection = self.driver.MongoClient 5301 else: 5302 Connection = self.driver.Connection 5303 return Connection(uri)[m.get('database')]
5304 5305 self.reconnect(connector,cursor=False)
5306
5307 - def object_id(self, arg=None):
5308 """ Convert input to a valid Mongodb ObjectId instance 5309 5310 self.object_id("<random>") -> ObjectId (not unique) instance """ 5311 if not arg: 5312 arg = 0 5313 if isinstance(arg, basestring): 5314 # we assume an integer as default input 5315 rawhex = len(arg.replace("0x", "").replace("L", "")) == 24 5316 if arg.isdigit() and (not rawhex): 5317 arg = int(arg) 5318 elif arg == "<random>": 5319 arg = int("0x%sL" % \ 5320 "".join([self.random.choice("0123456789abcdef") \ 5321 for x in range(24)]), 0) 5322 elif arg.isalnum(): 5323 if not arg.startswith("0x"): 5324 arg = "0x%s" % arg 5325 try: 5326 arg = int(arg, 0) 5327 except ValueError, e: 5328 raise ValueError( 5329 "invalid objectid argument string: %s" % e) 5330 else: 5331 raise ValueError("Invalid objectid argument string. " + 5332 "Requires an integer or base 16 value") 5333 elif isinstance(arg, self.ObjectId): 5334 return arg 5335 5336 if not isinstance(arg, (int, long)): 5337 raise TypeError("object_id argument must be of type " + 5338 "ObjectId or an objectid representable integer") 5339 if arg == 0: 5340 hexvalue = "".zfill(24) 5341 else: 5342 hexvalue = hex(arg)[2:].replace("L", "") 5343 return self.ObjectId(hexvalue)
5344
5345 - def parse_reference(self, value, field_type):
5346 # here we have to check for ObjectID before base parse 5347 if isinstance(value, self.ObjectId): 5348 value = long(str(value), 16) 5349 return super(MongoDBAdapter, 5350 self).parse_reference(value, field_type)
5351
5352 - def parse_id(self, value, field_type):
5353 if isinstance(value, self.ObjectId): 5354 value = long(str(value), 16) 5355 return super(MongoDBAdapter, 5356 self).parse_id(value, field_type)
5357
5358 - def represent(self, obj, fieldtype):
5359 # the base adatpter does not support MongoDB ObjectId 5360 if isinstance(obj, self.ObjectId): 5361 value = obj 5362 else: 5363 value = NoSQLAdapter.represent(self, obj, fieldtype) 5364 # reference types must be convert to ObjectID 5365 if fieldtype =='date': 5366 if value == None: 5367 return value 5368 # this piece of data can be stripped off based on the fieldtype 5369 t = datetime.time(0, 0, 0) 5370 # mongodb doesn't has a date object and so it must datetime, 5371 # string or integer 5372 return datetime.datetime.combine(value, t) 5373 elif fieldtype == 'time': 5374 if value == None: 5375 return value 5376 # this piece of data can be stripped of based on the fieldtype 5377 d = datetime.date(2000, 1, 1) 5378 # mongodb doesn't has a time object and so it must datetime, 5379 # string or integer 5380 return datetime.datetime.combine(d, value) 5381 elif fieldtype == "blob": 5382 from bson import Binary 5383 if not isinstance(value, Binary): 5384 return Binary(value) 5385 return value 5386 elif (isinstance(fieldtype, basestring) and 5387 fieldtype.startswith('list:')): 5388 if fieldtype.startswith('list:reference'): 5389 newval = [] 5390 for v in value: 5391 newval.append(self.object_id(v)) 5392 return newval 5393 return value 5394 elif ((isinstance(fieldtype, basestring) and 5395 fieldtype.startswith("reference")) or 5396 (isinstance(fieldtype, Table)) or fieldtype=="id"): 5397 value = self.object_id(value) 5398 return value
5399
5400 - def create_table(self, table, migrate=True, fake_migrate=False, 5401 polymodel=None, isCapped=False):
5402 if isCapped: 5403 raise RuntimeError("Not implemented")
5404
5405 - def count(self, query, distinct=None, snapshot=True):
5406 if distinct: 5407 raise RuntimeError("COUNT DISTINCT not supported") 5408 if not isinstance(query,Query): 5409 raise SyntaxError("Not Supported") 5410 tablename = self.get_table(query) 5411 return long(self.select(query,[self.db[tablename]._id], {}, 5412 count=True,snapshot=snapshot)['count'])
5413 # Maybe it would be faster if we just implemented the pymongo 5414 # .count() function which is probably quicker? 5415 # therefor call __select() connection[table].find(query).count() 5416 # Since this will probably reduce the return set? 5417
5418 - def expand(self, expression, field_type=None):
5419 if isinstance(expression, Query): 5420 # any query using 'id':= 5421 # set name as _id (as per pymongo/mongodb primary key) 5422 # convert second arg to an objectid field 5423 # (if its not already) 5424 # if second arg is 0 convert to objectid 5425 if isinstance(expression.first,Field) and \ 5426 ((expression.first.type == 'id') or \ 5427 ("reference" in expression.first.type)): 5428 if expression.first.type == 'id': 5429 expression.first.name = '_id' 5430 # cast to Mongo ObjectId 5431 if isinstance(expression.second, (tuple, list, set)): 5432 expression.second = [self.object_id(item) for 5433 item in expression.second] 5434 else: 5435 expression.second = self.object_id(expression.second) 5436 result = expression.op(expression.first, expression.second) 5437 5438 if isinstance(expression, Field): 5439 if expression.type=='id': 5440 result = "_id" 5441 else: 5442 result = expression.name 5443 elif isinstance(expression, (Expression, Query)): 5444 if not expression.second is None: 5445 result = expression.op(expression.first, expression.second) 5446 elif not expression.first is None: 5447 result = expression.op(expression.first) 5448 elif not isinstance(expression.op, str): 5449 result = expression.op() 5450 else: 5451 result = expression.op 5452 elif field_type: 5453 result = self.represent(expression,field_type) 5454 elif isinstance(expression,(list,tuple)): 5455 result = ','.join(self.represent(item,field_type) for 5456 item in expression) 5457 else: 5458 result = expression 5459 return result
5460
5461 - def drop(self, table, mode=''):
5462 ctable = self.connection[table._tablename] 5463 ctable.drop()
5464
5465 - def truncate(self, table, mode, safe=None):
5466 if safe == None: 5467 safe=self.safe 5468 ctable = self.connection[table._tablename] 5469 ctable.remove(None, safe=True)
5470
5471 - def _select(self, query, fields, attributes):
5472 if 'for_update' in attributes: 5473 logging.warn('mongodb does not support for_update') 5474 for key in set(attributes.keys())-set(('limitby', 5475 'orderby','for_update')): 5476 if attributes[key]!=None: 5477 logging.warn('select attribute not implemented: %s' % key) 5478 5479 new_fields=[] 5480 mongosort_list = [] 5481 5482 # try an orderby attribute 5483 orderby = attributes.get('orderby', False) 5484 limitby = attributes.get('limitby', False) 5485 # distinct = attributes.get('distinct', False) 5486 if orderby: 5487 if isinstance(orderby, (list, tuple)): 5488 orderby = xorify(orderby) 5489 5490 # !!!! need to add 'random' 5491 for f in self.expand(orderby).split(','): 5492 if f.startswith('-'): 5493 mongosort_list.append((f[1:], -1)) 5494 else: 5495 mongosort_list.append((f, 1)) 5496 if limitby: 5497 limitby_skip, limitby_limit = limitby[0], int(limitby[1]) 5498 else: 5499 limitby_skip = limitby_limit = 0 5500 5501 mongofields_dict = self.SON() 5502 mongoqry_dict = {} 5503 for item in fields: 5504 if isinstance(item, SQLALL): 5505 new_fields += item._table 5506 else: 5507 new_fields.append(item) 5508 fields = new_fields 5509 if isinstance(query,Query): 5510 tablename = self.get_table(query) 5511 elif len(fields) != 0: 5512 tablename = fields[0].tablename 5513 else: 5514 raise SyntaxError("The table name could not be found in " + 5515 "the query nor from the select statement.") 5516 mongoqry_dict = self.expand(query) 5517 fields = fields or self.db[tablename] 5518 for field in fields: 5519 mongofields_dict[field.name] = 1 5520 5521 return tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5522 limitby_limit, limitby_skip
5523
5524 - def select(self, query, fields, attributes, count=False, 5525 snapshot=False):
5526 # TODO: support joins 5527 tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5528 limitby_limit, limitby_skip = self._select(query, fields, attributes) 5529 ctable = self.connection[tablename] 5530 5531 if count: 5532 return {'count' : ctable.find( 5533 mongoqry_dict, mongofields_dict, 5534 skip=limitby_skip, limit=limitby_limit, 5535 sort=mongosort_list, snapshot=snapshot).count()} 5536 else: 5537 # pymongo cursor object 5538 mongo_list_dicts = ctable.find(mongoqry_dict, 5539 mongofields_dict, skip=limitby_skip, 5540 limit=limitby_limit, sort=mongosort_list, 5541 snapshot=snapshot) 5542 rows = [] 5543 # populate row in proper order 5544 # Here we replace ._id with .id to follow the standard naming 5545 colnames = [] 5546 newnames = [] 5547 for field in fields: 5548 colname = str(field) 5549 colnames.append(colname) 5550 tablename, fieldname = colname.split(".") 5551 if fieldname == "_id": 5552 # Mongodb reserved uuid key 5553 field.name = "id" 5554 newnames.append(".".join((tablename, field.name))) 5555 5556 for record in mongo_list_dicts: 5557 row=[] 5558 for colname in colnames: 5559 tablename, fieldname = colname.split(".") 5560 # switch to Mongo _id uuids for retrieving 5561 # record id's 5562 if fieldname == "id": fieldname = "_id" 5563 if fieldname in record: 5564 value = record[fieldname] 5565 else: 5566 value = None 5567 row.append(value) 5568 rows.append(row) 5569 5570 processor = attributes.get('processor', self.parse) 5571 result = processor(rows, fields, newnames, False) 5572 return result
5573
5574 - def _insert(self, table, fields):
5575 values = dict() 5576 for k, v in fields: 5577 if not k.name in ["id", "safe"]: 5578 fieldname = k.name 5579 fieldtype = table[k.name].type 5580 values[fieldname] = self.represent(v, fieldtype) 5581 return values
5582 5583 # Safe determines whether a asynchronious request is done or a 5584 # synchronious action is done 5585 # For safety, we use by default synchronous requests
5586 - def insert(self, table, fields, safe=None):
5587 if safe==None: 5588 safe = self.safe 5589 ctable = self.connection[table._tablename] 5590 values = self._insert(table, fields) 5591 ctable.insert(values, safe=safe) 5592 return long(str(values['_id']), 16)
5593 5594 #this function returns a dict with the where clause and update fields
5595 - def _update(self, tablename, query, fields):
5596 if not isinstance(query, Query): 5597 raise SyntaxError("Not Supported") 5598 filter = None 5599 if query: 5600 filter = self.expand(query) 5601 # do not try to update id fields to avoid backend errors 5602 modify = {'$set': dict((k.name, self.represent(v, k.type)) for 5603 k, v in fields if (not k.name in ("_id", "id")))} 5604 return modify, filter
5605
5606 - def update(self, tablename, query, fields, safe=None):
5607 if safe == None: 5608 safe = self.safe 5609 # return amount of adjusted rows or zero, but no exceptions 5610 # @ related not finding the result 5611 if not isinstance(query, Query): 5612 raise RuntimeError("Not implemented") 5613 amount = self.count(query, False) 5614 modify, filter = self._update(tablename, query, fields) 5615 try: 5616 result = self.connection[tablename].update(filter, 5617 modify, multi=True, safe=safe) 5618 if safe: 5619 try: 5620 # if result count is available fetch it 5621 return result["n"] 5622 except (KeyError, AttributeError, TypeError): 5623 return amount 5624 else: 5625 return amount 5626 except Exception, e: 5627 # TODO Reverse update query to verifiy that the query succeded 5628 raise RuntimeError("uncaught exception when updating rows: %s" % e)
5629
5630 - def _delete(self, tablename, query):
5631 if not isinstance(query, Query): 5632 raise RuntimeError("query type %s is not supported" % \ 5633 type(query)) 5634 return self.expand(query)
5635
5636 - def delete(self, tablename, query, safe=None):
5637 if safe is None: 5638 safe = self.safe 5639 amount = 0 5640 amount = self.count(query, False) 5641 filter = self._delete(tablename, query) 5642 self.connection[tablename].remove(filter, safe=safe) 5643 return amount
5644
5645 - def bulk_insert(self, table, items):
5646 return [self.insert(table,item) for item in items]
5647 5648 ## OPERATORS
5649 - def INVERT(self, first):
5650 #print "in invert first=%s" % first 5651 return '-%s' % self.expand(first)
5652 5653 # TODO This will probably not work:(
5654 - def NOT(self, first):
5655 result = {} 5656 result["$not"] = self.expand(first) 5657 return result
5658
5659 - def AND(self,first,second):
5660 f = self.expand(first) 5661 s = self.expand(second) 5662 f.update(s) 5663 return f
5664
5665 - def OR(self,first,second):
5666 # pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]}) 5667 result = {} 5668 f = self.expand(first) 5669 s = self.expand(second) 5670 result['$or'] = [f,s] 5671 return result
5672
5673 - def BELONGS(self, first, second):
5674 if isinstance(second, str): 5675 return {self.expand(first) : {"$in" : [ second[:-1]]} } 5676 elif second==[] or second==() or second==set(): 5677 return {1:0} 5678 items = [self.expand(item, first.type) for item in second] 5679 return {self.expand(first) : {"$in" : items} }
5680
5681 - def EQ(self,first,second=None):
5682 result = {} 5683 result[self.expand(first)] = self.expand(second) 5684 return result
5685
5686 - def NE(self, first, second=None):
5687 result = {} 5688 result[self.expand(first)] = {'$ne': self.expand(second)} 5689 return result
5690
5691 - def LT(self,first,second=None):
5692 if second is None: 5693 raise RuntimeError("Cannot compare %s < None" % first) 5694 result = {} 5695 result[self.expand(first)] = {'$lt': self.expand(second)} 5696 return result
5697
5698 - def LE(self,first,second=None):
5699 if second is None: 5700 raise RuntimeError("Cannot compare %s <= None" % first) 5701 result = {} 5702 result[self.expand(first)] = {'$lte': self.expand(second)} 5703 return result
5704
5705 - def GT(self,first,second):
5706 result = {} 5707 result[self.expand(first)] = {'$gt': self.expand(second)} 5708 return result
5709
5710 - def GE(self,first,second=None):
5711 if second is None: 5712 raise RuntimeError("Cannot compare %s >= None" % first) 5713 result = {} 5714 result[self.expand(first)] = {'$gte': self.expand(second)} 5715 return result
5716
5717 - def ADD(self, first, second):
5718 raise NotImplementedError(self.error_messages["javascript_needed"]) 5719 return '%s + %s' % (self.expand(first), 5720 self.expand(second, first.type))
5721
5722 - def SUB(self, first, second):
5723 raise NotImplementedError(self.error_messages["javascript_needed"]) 5724 return '(%s - %s)' % (self.expand(first), 5725 self.expand(second, first.type))
5726
5727 - def MUL(self, first, second):
5728 raise NotImplementedError(self.error_messages["javascript_needed"]) 5729 return '(%s * %s)' % (self.expand(first), 5730 self.expand(second, first.type))
5731
5732 - def DIV(self, first, second):
5733 raise NotImplementedError(self.error_messages["javascript_needed"]) 5734 return '(%s / %s)' % (self.expand(first), 5735 self.expand(second, first.type))
5736
5737 - def MOD(self, first, second):
5738 raise NotImplementedError(self.error_messages["javascript_needed"]) 5739 return '(%s %% %s)' % (self.expand(first), 5740 self.expand(second, first.type))
5741
5742 - def AS(self, first, second):
5743 raise NotImplementedError(self.error_messages["javascript_needed"]) 5744 return '%s AS %s' % (self.expand(first), second)
5745 5746 # We could implement an option that simulates a full featured SQL 5747 # database. But I think the option should be set explicit or 5748 # implemented as another library.
5749 - def ON(self, first, second):
5750 raise NotImplementedError("This is not possible in NoSQL" + 5751 " but can be simulated with a wrapper.") 5752 return '%s ON %s' % (self.expand(first), self.expand(second))
5753 5754 # BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS 5755 # WHICH ONE IS BEST? 5756
5757 - def COMMA(self, first, second):
5758 return '%s, %s' % (self.expand(first), self.expand(second))
5759
5760 - def LIKE(self, first, second):
5761 #escaping regex operators? 5762 return {self.expand(first): ('%s' % \ 5763 self.expand(second, 'string').replace('%','/'))}
5764
5765 - def STARTSWITH(self, first, second):
5766 #escaping regex operators? 5767 return {self.expand(first): ('/^%s/' % \ 5768 self.expand(second, 'string'))}
5769
5770 - def ENDSWITH(self, first, second):
5771 #escaping regex operators? 5772 return {self.expand(first): ('/%s^/' % \ 5773 self.expand(second, 'string'))}
5774
5775 - def CONTAINS(self, first, second, case_sensitive=False):
5776 # silently ignore, only case sensitive 5777 # There is a technical difference, but mongodb doesn't support 5778 # that, but the result will be the same 5779 val = second if isinstance(second,self.ObjectId) else \ 5780 {'$regex':".*" + re.escape(self.expand(second, 'string')) + ".*"} 5781 return {self.expand(first) : val}
5782
5783 - def LIKE(self, first, second):
5784 import re 5785 return {self.expand(first): {'$regex': \ 5786 re.escape(self.expand(second, 5787 'string')).replace('%','.*')}}
5788 5789 #TODO verify full compatibilty with official SQL Like operator
5790 - def STARTSWITH(self, first, second):
5791 #TODO Solve almost the same problem as with endswith 5792 import re 5793 return {self.expand(first): {'$regex' : '^' + 5794 re.escape(self.expand(second, 5795 'string'))}}
5796 5797 #TODO verify full compatibilty with official SQL Like operator
5798 - def ENDSWITH(self, first, second):
5799 #escaping regex operators? 5800 #TODO if searched for a name like zsa_corbitt and the function 5801 # is endswith('a') then this is also returned. 5802 # Aldo it end with a t 5803 import re 5804 return {self.expand(first): {'$regex': \ 5805 re.escape(self.expand(second, 'string')) + '$'}}
5806 5807 #TODO verify full compatibilty with official oracle contains operator
5808 - def CONTAINS(self, first, second, case_sensitive=False):
5809 # silently ignore, only case sensitive 5810 #There is a technical difference, but mongodb doesn't support 5811 # that, but the result will be the same 5812 #TODO contains operators need to be transformed to Regex 5813 return {self.expand(first) : {'$regex': \ 5814 ".*" + re.escape(self.expand(second, 'string')) + ".*"}}
5815
5816 5817 -class IMAPAdapter(NoSQLAdapter):
5818 drivers = ('imaplib',) 5819 5820 """ IMAP server adapter 5821 5822 This class is intended as an interface with 5823 email IMAP servers to perform simple queries in the 5824 web2py DAL query syntax, so email read, search and 5825 other related IMAP mail services (as those implemented 5826 by brands like Google(r), and Yahoo!(r) 5827 can be managed from web2py applications. 5828 5829 The code uses examples by Yuji Tomita on this post: 5830 http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137 5831 and is based in docs for Python imaplib, python email 5832 and email IETF's (i.e. RFC2060 and RFC3501) 5833 5834 This adapter was tested with a small set of operations with Gmail(r). Other 5835 services requests could raise command syntax and response data issues. 5836 5837 It creates its table and field names "statically", 5838 meaning that the developer should leave the table and field 5839 definitions to the DAL instance by calling the adapter's 5840 .define_tables() method. The tables are defined with the 5841 IMAP server mailbox list information. 5842 5843 .define_tables() returns a dictionary mapping dal tablenames 5844 to the server mailbox names with the following structure: 5845 5846 {<tablename>: str <server mailbox name>} 5847 5848 Here is a list of supported fields: 5849 5850 Field Type Description 5851 ################################################################ 5852 uid string 5853 answered boolean Flag 5854 created date 5855 content list:string A list of dict text or html parts 5856 to string 5857 cc string 5858 bcc string 5859 size integer the amount of octets of the message* 5860 deleted boolean Flag 5861 draft boolean Flag 5862 flagged boolean Flag 5863 sender string 5864 recent boolean Flag 5865 seen boolean Flag 5866 subject string 5867 mime string The mime header declaration 5868 email string The complete RFC822 message** 5869 attachments <type list> Each non text part as dict 5870 encoding string The main detected encoding 5871 5872 *At the application side it is measured as the length of the RFC822 5873 message string 5874 5875 WARNING: As row id's are mapped to email sequence numbers, 5876 make sure your imap client web2py app does not delete messages 5877 during select or update actions, to prevent 5878 updating or deleting different messages. 5879 Sequence numbers change whenever the mailbox is updated. 5880 To avoid this sequence numbers issues, it is recommended the use 5881 of uid fields in query references (although the update and delete 5882 in separate actions rule still applies). 5883 5884 # This is the code recommended to start imap support 5885 # at the app's model: 5886 5887 imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl 5888 imapdb.define_tables() 5889 5890 Here is an (incomplete) list of possible imap commands: 5891 5892 # Count today's unseen messages 5893 # smaller than 6000 octets from the 5894 # inbox mailbox 5895 5896 q = imapdb.INBOX.seen == False 5897 q &= imapdb.INBOX.created == datetime.date.today() 5898 q &= imapdb.INBOX.size < 6000 5899 unread = imapdb(q).count() 5900 5901 # Fetch last query messages 5902 rows = imapdb(q).select() 5903 5904 # it is also possible to filter query select results with limitby and 5905 # sequences of mailbox fields 5906 5907 set.select(<fields sequence>, limitby=(<int>, <int>)) 5908 5909 # Mark last query messages as seen 5910 messages = [row.uid for row in rows] 5911 seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True) 5912 5913 # Delete messages in the imap database that have mails from mr. Gumby 5914 5915 deleted = 0 5916 for mailbox in imapdb.tables 5917 deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete() 5918 5919 # It is possible also to mark messages for deletion instead of ereasing them 5920 # directly with set.update(deleted=True) 5921 5922 5923 # This object give access 5924 # to the adapter auto mailbox 5925 # mapped names (which native 5926 # mailbox has what table name) 5927 5928 imapdb.mailboxes <dict> # tablename, server native name pairs 5929 5930 # To retrieve a table native mailbox name use: 5931 imapdb.<table>.mailbox 5932 5933 ### New features v2.4.1: 5934 5935 # Declare mailboxes statically with tablename, name pairs 5936 # This avoids the extra server names retrieval 5937 5938 imapdb.define_tables({"inbox": "INBOX"}) 5939 5940 # Selects without content/attachments/email columns will only 5941 # fetch header and flags 5942 5943 imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject) 5944 """ 5945 5946 types = { 5947 'string': str, 5948 'text': str, 5949 'date': datetime.date, 5950 'datetime': datetime.datetime, 5951 'id': long, 5952 'boolean': bool, 5953 'integer': int, 5954 'bigint': long, 5955 'blob': str, 5956 'list:string': str, 5957 } 5958 5959 dbengine = 'imap' 5960 5961 REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$') 5962
5963 - def __init__(self, 5964 db, 5965 uri, 5966 pool_size=0, 5967 folder=None, 5968 db_codec ='UTF-8', 5969 credential_decoder=IDENTITY, 5970 driver_args={}, 5971 adapter_args={}, 5972 do_connect=True, 5973 after_connection=None):
5974 5975 # db uri: user@example.com:password@imap.server.com:123 5976 # TODO: max size adapter argument for preventing large mail transfers 5977 5978 self.db = db 5979 self.uri = uri 5980 if do_connect: self.find_driver(adapter_args) 5981 self.pool_size=pool_size 5982 self.folder = folder 5983 self.db_codec = db_codec 5984 self._after_connection = after_connection 5985 self.credential_decoder = credential_decoder 5986 self.driver_args = driver_args 5987 self.adapter_args = adapter_args 5988 self.mailbox_size = None 5989 self.static_names = None 5990 self.charset = sys.getfilesystemencoding() 5991 # imap class 5992 self.imap4 = None 5993 uri = uri.split("://")[1] 5994 5995 """ MESSAGE is an identifier for sequence number""" 5996 5997 self.flags = {'deleted': '\\Deleted', 'draft': '\\Draft', 5998 'flagged': '\\Flagged', 'recent': '\\Recent', 5999 'seen': '\\Seen', 'answered': '\\Answered'} 6000 self.search_fields = { 6001 'id': 'MESSAGE', 'created': 'DATE', 6002 'uid': 'UID', 'sender': 'FROM', 6003 'to': 'TO', 'cc': 'CC', 6004 'bcc': 'BCC', 'content': 'TEXT', 6005 'size': 'SIZE', 'deleted': '\\Deleted', 6006 'draft': '\\Draft', 'flagged': '\\Flagged', 6007 'recent': '\\Recent', 'seen': '\\Seen', 6008 'subject': 'SUBJECT', 'answered': '\\Answered', 6009 'mime': None, 'email': None, 6010 'attachments': None 6011 } 6012 6013 db['_lastsql'] = '' 6014 6015 m = self.REGEX_URI.match(uri) 6016 user = m.group('user') 6017 password = m.group('password') 6018 host = m.group('host') 6019 port = int(m.group('port')) 6020 over_ssl = False 6021 if port==993: 6022 over_ssl = True 6023 6024 driver_args.update(host=host,port=port, password=password, user=user) 6025 def connector(driver_args=driver_args): 6026 # it is assumed sucessful authentication alLways 6027 # TODO: support direct connection and login tests 6028 if over_ssl: 6029 self.imap4 = self.driver.IMAP4_SSL 6030 else: 6031 self.imap4 = self.driver.IMAP4 6032 connection = self.imap4(driver_args["host"], driver_args["port"]) 6033 data = connection.login(driver_args["user"], driver_args["password"]) 6034 6035 # static mailbox list 6036 connection.mailbox_names = None 6037 6038 # dummy cursor function 6039 connection.cursor = lambda : True 6040 6041 return connection
6042 6043 self.db.define_tables = self.define_tables 6044 self.connector = connector 6045 if do_connect: self.reconnect()
6046
6047 - def reconnect(self, f=None, cursor=True):
6048 """ 6049 IMAP4 Pool connection method 6050 6051 imap connection lacks of self cursor command. 6052 A custom command should be provided as a replacement 6053 for connection pooling to prevent uncaught remote session 6054 closing 6055 6056 """ 6057 if getattr(self,'connection',None) != None: 6058 return 6059 if f is None: 6060 f = self.connector 6061 6062 if not self.pool_size: 6063 self.connection = f() 6064 self.cursor = cursor and self.connection.cursor() 6065 else: 6066 POOLS = ConnectionPool.POOLS 6067 uri = self.uri 6068 while True: 6069 GLOBAL_LOCKER.acquire() 6070 if not uri in POOLS: 6071 POOLS[uri] = [] 6072 if POOLS[uri]: 6073 self.connection = POOLS[uri].pop() 6074 GLOBAL_LOCKER.release() 6075 self.cursor = cursor and self.connection.cursor() 6076 if self.cursor and self.check_active_connection: 6077 try: 6078 # check if connection is alive or close it 6079 result, data = self.connection.list() 6080 except: 6081 # Possible connection reset error 6082 # TODO: read exception class 6083 self.connection = f() 6084 break 6085 else: 6086 GLOBAL_LOCKER.release() 6087 self.connection = f() 6088 self.cursor = cursor and self.connection.cursor() 6089 break 6090 self.after_connection_hook()
6091
6092 - def get_last_message(self, tablename):
6093 last_message = None 6094 # request mailbox list to the server 6095 # if needed 6096 if not isinstance(self.connection.mailbox_names, dict): 6097 self.get_mailboxes() 6098 try: 6099 result = self.connection.select(self.connection.mailbox_names[tablename]) 6100 last_message = int(result[1][0]) 6101 except (IndexError, ValueError, TypeError, KeyError): 6102 e = sys.exc_info()[1] 6103 LOGGER.debug("Error retrieving the last mailbox sequence number. %s" % str(e)) 6104 return last_message
6105
6106 - def get_uid_bounds(self, tablename):
6107 if not isinstance(self.connection.mailbox_names, dict): 6108 self.get_mailboxes() 6109 # fetch first and last messages 6110 # return (first, last) messages uid's 6111 last_message = self.get_last_message(tablename) 6112 result, data = self.connection.uid("search", None, "(ALL)") 6113 uid_list = data[0].strip().split() 6114 if len(uid_list) <= 0: 6115 return None 6116 else: 6117 return (uid_list[0], uid_list[-1])
6118
6119 - def convert_date(self, date, add=None):
6120 if add is None: 6121 add = datetime.timedelta() 6122 """ Convert a date object to a string 6123 with d-Mon-Y style for IMAP or the inverse 6124 case 6125 6126 add <timedelta> adds to the date object 6127 """ 6128 months = [None, "JAN","FEB","MAR","APR","MAY","JUN", 6129 "JUL", "AUG","SEP","OCT","NOV","DEC"] 6130 if isinstance(date, basestring): 6131 # Prevent unexpected date response format 6132 try: 6133 dayname, datestring = date.split(",") 6134 date_list = datestring.strip().split() 6135 year = int(date_list[2]) 6136 month = months.index(date_list[1].upper()) 6137 day = int(date_list[0]) 6138 hms = map(int, date_list[3].split(":")) 6139 return datetime.datetime(year, month, day, 6140 hms[0], hms[1], hms[2]) + add 6141 except (ValueError, AttributeError, IndexError), e: 6142 LOGGER.error("Could not parse date text: %s. %s" % 6143 (date, e)) 6144 return None 6145 elif isinstance(date, (datetime.datetime, datetime.date)): 6146 return (date + add).strftime("%d-%b-%Y") 6147 else: 6148 return None
6149 6150 @staticmethod
6151 - def header_represent(f, r):
6152 from email.header import decode_header 6153 text, encoding = decode_header(f)[0] 6154 if encoding: 6155 text = text.decode(encoding).encode('utf-8') 6156 return text
6157
6158 - def encode_text(self, text, charset, errors="replace"):
6159 """ convert text for mail to unicode""" 6160 if text is None: 6161 text = "" 6162 else: 6163 if isinstance(text, str): 6164 if charset is None: 6165 text = unicode(text, "utf-8", errors) 6166 else: 6167 text = unicode(text, charset, errors) 6168 else: 6169 raise Exception("Unsupported mail text type %s" % type(text)) 6170 return text.encode("utf-8")
6171
6172 - def get_charset(self, message):
6173 charset = message.get_content_charset() 6174 return charset
6175
6176 - def get_mailboxes(self):
6177 """ Query the mail database for mailbox names """ 6178 if self.static_names: 6179 # statically defined mailbox names 6180 self.connection.mailbox_names = self.static_names 6181 return self.static_names.keys() 6182 6183 mailboxes_list = self.connection.list() 6184 self.connection.mailbox_names = dict() 6185 mailboxes = list() 6186 x = 0 6187 for item in mailboxes_list[1]: 6188 x = x + 1 6189 item = item.strip() 6190 if not "NOSELECT" in item.upper(): 6191 sub_items = item.split("\"") 6192 sub_items = [sub_item for sub_item in sub_items \ 6193 if len(sub_item.strip()) > 0] 6194 # mailbox = sub_items[len(sub_items) -1] 6195 mailbox = sub_items[-1] 6196 # remove unwanted characters and store original names 6197 # Don't allow leading non alphabetic characters 6198 mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox))) 6199 mailboxes.append(mailbox_name) 6200 self.connection.mailbox_names[mailbox_name] = mailbox 6201 6202 return mailboxes
6203
6204 - def get_query_mailbox(self, query):
6205 nofield = True 6206 tablename = None 6207 attr = query 6208 while nofield: 6209 if hasattr(attr, "first"): 6210 attr = attr.first 6211 if isinstance(attr, Field): 6212 return attr.tablename 6213 elif isinstance(attr, Query): 6214 pass 6215 else: 6216 return None 6217 else: 6218 return None 6219 return tablename
6220
6221 - def is_flag(self, flag):
6222 if self.search_fields.get(flag, None) in self.flags.values(): 6223 return True 6224 else: 6225 return False
6226
6227 - def define_tables(self, mailbox_names=None):
6228 """ 6229 Auto create common IMAP fileds 6230 6231 This function creates fields definitions "statically" 6232 meaning that custom fields as in other adapters should 6233 not be supported and definitions handled on a service/mode 6234 basis (local syntax for Gmail(r), Ymail(r) 6235 6236 Returns a dictionary with tablename, server native mailbox name 6237 pairs. 6238 """ 6239 if mailbox_names: 6240 # optional statically declared mailboxes 6241 self.static_names = mailbox_names 6242 else: 6243 self.static_names = None 6244 if not isinstance(self.connection.mailbox_names, dict): 6245 self.get_mailboxes() 6246 6247 names = self.connection.mailbox_names.keys() 6248 6249 for name in names: 6250 self.db.define_table("%s" % name, 6251 Field("uid", "string", writable=False), 6252 Field("answered", "boolean"), 6253 Field("created", "datetime", writable=False), 6254 Field("content", list, writable=False), 6255 Field("to", "string", writable=False), 6256 Field("cc", "string", writable=False), 6257 Field("bcc", "string", writable=False), 6258 Field("size", "integer", writable=False), 6259 Field("deleted", "boolean"), 6260 Field("draft", "boolean"), 6261 Field("flagged", "boolean"), 6262 Field("sender", "string", writable=False), 6263 Field("recent", "boolean", writable=False), 6264 Field("seen", "boolean"), 6265 Field("subject", "string", writable=False), 6266 Field("mime", "string", writable=False), 6267 Field("email", "string", writable=False, readable=False), 6268 Field("attachments", list, writable=False, readable=False), 6269 Field("encoding", writable=False) 6270 ) 6271 6272 # Set a special _mailbox attribute for storing 6273 # native mailbox names 6274 self.db[name].mailbox = \ 6275 self.connection.mailbox_names[name] 6276 6277 # decode quoted printable 6278 self.db[name].to.represent = self.db[name].cc.represent = \ 6279 self.db[name].bcc.represent = self.db[name].sender.represent = \ 6280 self.db[name].subject.represent = self.header_represent 6281 6282 # Set the db instance mailbox collections 6283 self.db.mailboxes = self.connection.mailbox_names 6284 return self.db.mailboxes
6285
6286 - def create_table(self, *args, **kwargs):
6287 # not implemented 6288 # but required by DAL 6289 pass
6290
6291 - def _select(self, query, fields, attributes):
6292 if use_common_filters(query): 6293 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6294 return str(query)
6295
6296 - def select(self, query, fields, attributes):
6297 """ Search and Fetch records and return web2py rows 6298 """ 6299 # move this statement elsewhere (upper-level) 6300 if use_common_filters(query): 6301 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6302 6303 import email 6304 # get records from imap server with search + fetch 6305 # convert results to a dictionary 6306 tablename = None 6307 fetch_results = list() 6308 6309 if isinstance(query, Query): 6310 tablename = self.get_table(query) 6311 mailbox = self.connection.mailbox_names.get(tablename, None) 6312 if mailbox is None: 6313 raise ValueError("Mailbox name not found: %s" % mailbox) 6314 else: 6315 # select with readonly 6316 result, selected = self.connection.select(mailbox, True) 6317 if result != "OK": 6318 raise Exception("IMAP error: %s" % selected) 6319 self.mailbox_size = int(selected[0]) 6320 search_query = "(%s)" % str(query).strip() 6321 search_result = self.connection.uid("search", None, search_query) 6322 # Normal IMAP response OK is assumed (change this) 6323 if search_result[0] == "OK": 6324 # For "light" remote server responses just get the first 6325 # ten records (change for non-experimental implementation) 6326 # However, light responses are not guaranteed with this 6327 # approach, just fewer messages. 6328 limitby = attributes.get('limitby', None) 6329 messages_set = search_result[1][0].split() 6330 # descending order 6331 messages_set.reverse() 6332 if limitby is not None: 6333 # TODO: orderby, asc/desc, limitby from complete message set 6334 messages_set = messages_set[int(limitby[0]):int(limitby[1])] 6335 6336 # keep the requests small for header/flags 6337 if any([(field.name in ["content", "size", 6338 "attachments", "email"]) for 6339 field in fields]): 6340 imap_fields = "(RFC822 FLAGS)" 6341 else: 6342 imap_fields = "(RFC822.HEADER FLAGS)" 6343 6344 if len(messages_set) > 0: 6345 # create fetch results object list 6346 # fetch each remote message and store it in memmory 6347 # (change to multi-fetch command syntax for faster 6348 # transactions) 6349 for uid in messages_set: 6350 # fetch the RFC822 message body 6351 typ, data = self.connection.uid("fetch", uid, imap_fields) 6352 if typ == "OK": 6353 fr = {"message": int(data[0][0].split()[0]), 6354 "uid": long(uid), 6355 "email": email.message_from_string(data[0][1]), 6356 "raw_message": data[0][1]} 6357 fr["multipart"] = fr["email"].is_multipart() 6358 # fetch flags for the message 6359 fr["flags"] = self.driver.ParseFlags(data[1]) 6360 fetch_results.append(fr) 6361 else: 6362 # error retrieving the message body 6363 raise Exception("IMAP error retrieving the body: %s" % data) 6364 else: 6365 raise Exception("IMAP search error: %s" % search_result[1]) 6366 elif isinstance(query, (Expression, basestring)): 6367 raise NotImplementedError() 6368 else: 6369 raise TypeError("Unexpected query type") 6370 6371 imapqry_dict = {} 6372 imapfields_dict = {} 6373 6374 if len(fields) == 1 and isinstance(fields[0], SQLALL): 6375 allfields = True 6376 elif len(fields) == 0: 6377 allfields = True 6378 else: 6379 allfields = False 6380 if allfields: 6381 colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()] 6382 else: 6383 colnames = ["%s.%s" % (tablename, field.name) for field in fields] 6384 6385 for k in colnames: 6386 imapfields_dict[k] = k 6387 6388 imapqry_list = list() 6389 imapqry_array = list() 6390 for fr in fetch_results: 6391 attachments = [] 6392 content = [] 6393 size = 0 6394 n = int(fr["message"]) 6395 item_dict = dict() 6396 message = fr["email"] 6397 uid = fr["uid"] 6398 charset = self.get_charset(message) 6399 flags = fr["flags"] 6400 raw_message = fr["raw_message"] 6401 # Return messages data mapping static fields 6402 # and fetched results. Mapping should be made 6403 # outside the select function (with auxiliary 6404 # instance methods) 6405 6406 # pending: search flags states trough the email message 6407 # instances for correct output 6408 6409 # preserve subject encoding (ASCII/quoted printable) 6410 6411 if "%s.id" % tablename in colnames: 6412 item_dict["%s.id" % tablename] = n 6413 if "%s.created" % tablename in colnames: 6414 item_dict["%s.created" % tablename] = self.convert_date(message["Date"]) 6415 if "%s.uid" % tablename in colnames: 6416 item_dict["%s.uid" % tablename] = uid 6417 if "%s.sender" % tablename in colnames: 6418 # If there is no encoding found in the message header 6419 # force utf-8 replacing characters (change this to 6420 # module's defaults). Applies to .sender, .to, .cc and .bcc fields 6421 item_dict["%s.sender" % tablename] = message["From"] 6422 if "%s.to" % tablename in colnames: 6423 item_dict["%s.to" % tablename] = message["To"] 6424 if "%s.cc" % tablename in colnames: 6425 if "Cc" in message.keys(): 6426 item_dict["%s.cc" % tablename] = message["Cc"] 6427 else: 6428 item_dict["%s.cc" % tablename] = "" 6429 if "%s.bcc" % tablename in colnames: 6430 if "Bcc" in message.keys(): 6431 item_dict["%s.bcc" % tablename] = message["Bcc"] 6432 else: 6433 item_dict["%s.bcc" % tablename] = "" 6434 if "%s.deleted" % tablename in colnames: 6435 item_dict["%s.deleted" % tablename] = "\\Deleted" in flags 6436 if "%s.draft" % tablename in colnames: 6437 item_dict["%s.draft" % tablename] = "\\Draft" in flags 6438 if "%s.flagged" % tablename in colnames: 6439 item_dict["%s.flagged" % tablename] = "\\Flagged" in flags 6440 if "%s.recent" % tablename in colnames: 6441 item_dict["%s.recent" % tablename] = "\\Recent" in flags 6442 if "%s.seen" % tablename in colnames: 6443 item_dict["%s.seen" % tablename] = "\\Seen" in flags 6444 if "%s.subject" % tablename in colnames: 6445 item_dict["%s.subject" % tablename] = message["Subject"] 6446 if "%s.answered" % tablename in colnames: 6447 item_dict["%s.answered" % tablename] = "\\Answered" in flags 6448 if "%s.mime" % tablename in colnames: 6449 item_dict["%s.mime" % tablename] = message.get_content_type() 6450 if "%s.encoding" % tablename in colnames: 6451 item_dict["%s.encoding" % tablename] = charset 6452 6453 # Here goes the whole RFC822 body as an email instance 6454 # for controller side custom processing 6455 # The message is stored as a raw string 6456 # >> email.message_from_string(raw string) 6457 # returns a Message object for enhanced object processing 6458 if "%s.email" % tablename in colnames: 6459 # WARNING: no encoding performed (raw message) 6460 item_dict["%s.email" % tablename] = raw_message 6461 6462 # Size measure as suggested in a Velocity Reviews post 6463 # by Tim Williams: "how to get size of email attachment" 6464 # Note: len() and server RFC822.SIZE reports doesn't match 6465 # To retrieve the server size for representation would add a new 6466 # fetch transaction to the process 6467 for part in message.walk(): 6468 maintype = part.get_content_maintype() 6469 if ("%s.attachments" % tablename in colnames) or \ 6470 ("%s.content" % tablename in colnames): 6471 payload = part.get_payload(decode=True) 6472 if payload: 6473 filename = part.get_filename() 6474 values = {"mime": part.get_content_type()} 6475 if ((filename or not "text" in maintype) and 6476 ("%s.attachments" % tablename in colnames)): 6477 values.update({"payload": payload, 6478 "filename": filename, 6479 "encoding": part.get_content_charset(), 6480 "disposition": part["Content-Disposition"]}) 6481 attachments.append(values) 6482 elif (("text" in maintype) and 6483 ("%s.content" % tablename in colnames)): 6484 values.update({"text": self.encode_text(payload, 6485 self.get_charset(part))}) 6486 content.append(values) 6487 6488 if "%s.size" % tablename in colnames: 6489 if part is not None: 6490 size += len(str(part)) 6491 item_dict["%s.content" % tablename] = content 6492 item_dict["%s.attachments" % tablename] = attachments 6493 item_dict["%s.size" % tablename] = size 6494 imapqry_list.append(item_dict) 6495 6496 # extra object mapping for the sake of rows object 6497 # creation (sends an array or lists) 6498 for item_dict in imapqry_list: 6499 imapqry_array_item = list() 6500 for fieldname in colnames: 6501 imapqry_array_item.append(item_dict[fieldname]) 6502 imapqry_array.append(imapqry_array_item) 6503 6504 # parse result and return a rows object 6505 colnames = colnames 6506 processor = attributes.get('processor',self.parse) 6507 return processor(imapqry_array, fields, colnames)
6508
6509 - def _insert(self, table, fields):
6510 def add_payload(message, obj): 6511 payload = Message() 6512 payload.set_charset(obj.get("encoding", "utf-8")) 6513 mime = obj.get("mime", None) 6514 if mime: 6515 payload.set_type(mime) 6516 if "text" in obj: 6517 payload.set_payload(obj["text"]) 6518 elif "payload" in obj: 6519 payload.set_payload(obj["payload"]) 6520 if "filename" in obj and obj["filename"]: 6521 payload.add_header("Content-Disposition", 6522 "attachment", filename=obj["filename"]) 6523 message.attach(payload)
6524 6525 mailbox = table.mailbox 6526 d = dict(((k.name, v) for k, v in fields)) 6527 date_time = (d.get("created", datetime.datetime.now())).timetuple() 6528 if len(d) > 0: 6529 message = d.get("email", None) 6530 attachments = d.get("attachments", []) 6531 content = d.get("content", []) 6532 flags = " ".join(["\\%s" % flag.capitalize() for flag in 6533 ("answered", "deleted", "draft", "flagged", 6534 "recent", "seen") if d.get(flag, False)]) 6535 if not message: 6536 from email.message import Message 6537 mime = d.get("mime", None) 6538 charset = d.get("encoding", None) 6539 message = Message() 6540 message["from"] = d.get("sender", "") 6541 message["subject"] = d.get("subject", "") 6542 if mime: 6543 message.set_type(mime) 6544 if charset: 6545 message.set_charset(charset) 6546 for item in ("to", "cc", "bcc"): 6547 value = d.get(item, "") 6548 if isinstance(value, basestring): 6549 message[item] = value 6550 else: 6551 message[item] = ";".join([i for i in 6552 value]) 6553 if (not message.is_multipart() and 6554 (not message.get_content_type().startswith( 6555 "multipart"))): 6556 if isinstance(content, basestring): 6557 message.set_payload(content) 6558 elif len(content) > 0: 6559 message.set_payload(content[0]["text"]) 6560 else: 6561 [add_payload(message, c) for c in content] 6562 [add_payload(message, a) for a in attachments] 6563 message = message.as_string() 6564 return (mailbox, flags, date_time, message) 6565 else: 6566 raise NotImplementedError("IMAP empty insert is not implemented") 6567
6568 - def insert(self, table, fields):
6569 values = self._insert(table, fields) 6570 result, data = self.connection.append(*values) 6571 if result == "OK": 6572 uid = int(re.findall("\d+", str(data))[-1]) 6573 return self.db(table.uid==uid).select(table.id).first().id 6574 else: 6575 raise Exception("IMAP message append failed: %s" % data)
6576
6577 - def _update(self, tablename, query, fields, commit=False):
6578 # TODO: the adapter should implement an .expand method 6579 commands = list() 6580 if use_common_filters(query): 6581 query = self.common_filter(query, [tablename,]) 6582 mark = [] 6583 unmark = [] 6584 if query: 6585 for item in fields: 6586 field = item[0] 6587 name = field.name 6588 value = item[1] 6589 if self.is_flag(name): 6590 flag = self.search_fields[name] 6591 if (value is not None) and (flag != "\\Recent"): 6592 if value: 6593 mark.append(flag) 6594 else: 6595 unmark.append(flag) 6596 result, data = self.connection.select( 6597 self.connection.mailbox_names[tablename]) 6598 string_query = "(%s)" % query 6599 result, data = self.connection.search(None, string_query) 6600 store_list = [item.strip() for item in data[0].split() 6601 if item.strip().isdigit()] 6602 # build commands for marked flags 6603 for number in store_list: 6604 result = None 6605 if len(mark) > 0: 6606 commands.append((number, "+FLAGS", "(%s)" % " ".join(mark))) 6607 if len(unmark) > 0: 6608 commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark))) 6609 return commands
6610
6611 - def update(self, tablename, query, fields):
6612 rowcount = 0 6613 commands = self._update(tablename, query, fields) 6614 for command in commands: 6615 result, data = self.connection.store(*command) 6616 if result == "OK": 6617 rowcount += 1 6618 else: 6619 raise Exception("IMAP storing error: %s" % data) 6620 return rowcount
6621
6622 - def _count(self, query, distinct=None):
6623 raise NotImplementedError()
6624
6625 - def count(self,query,distinct=None):
6626 counter = 0 6627 tablename = self.get_query_mailbox(query) 6628 if query and tablename is not None: 6629 if use_common_filters(query): 6630 query = self.common_filter(query, [tablename,]) 6631 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6632 string_query = "(%s)" % query 6633 result, data = self.connection.search(None, string_query) 6634 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6635 counter = len(store_list) 6636 return counter
6637
6638 - def delete(self, tablename, query):
6639 counter = 0 6640 if query: 6641 if use_common_filters(query): 6642 query = self.common_filter(query, [tablename,]) 6643 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6644 string_query = "(%s)" % query 6645 result, data = self.connection.search(None, string_query) 6646 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6647 for number in store_list: 6648 result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)") 6649 if result == "OK": 6650 counter += 1 6651 else: 6652 raise Exception("IMAP store error: %s" % data) 6653 if counter > 0: 6654 result, data = self.connection.expunge() 6655 return counter
6656
6657 - def BELONGS(self, first, second):
6658 result = None 6659 name = self.search_fields[first.name] 6660 if name == "MESSAGE": 6661 values = [str(val) for val in second if str(val).isdigit()] 6662 result = "%s" % ",".join(values).strip() 6663 6664 elif name == "UID": 6665 values = [str(val) for val in second if str(val).isdigit()] 6666 result = "UID %s" % ",".join(values).strip() 6667 6668 else: 6669 raise Exception("Operation not supported") 6670 # result = "(%s %s)" % (self.expand(first), self.expand(second)) 6671 return result
6672
6673 - def CONTAINS(self, first, second, case_sensitive=False):
6674 # silently ignore, only case sensitive 6675 result = None 6676 name = self.search_fields[first.name] 6677 6678 if name in ("FROM", "TO", "SUBJECT", "TEXT"): 6679 result = "%s \"%s\"" % (name, self.expand(second)) 6680 else: 6681 if first.name in ("cc", "bcc"): 6682 result = "%s \"%s\"" % (first.name.upper(), self.expand(second)) 6683 elif first.name == "mime": 6684 result = "HEADER Content-Type \"%s\"" % self.expand(second) 6685 else: 6686 raise Exception("Operation not supported") 6687 return result
6688
6689 - def GT(self, first, second):
6690 result = None 6691 name = self.search_fields[first.name] 6692 if name == "MESSAGE": 6693 last_message = self.get_last_message(first.tablename) 6694 result = "%d:%d" % (int(self.expand(second)) + 1, last_message) 6695 elif name == "UID": 6696 # GT and LT may not return 6697 # expected sets depending on 6698 # the uid format implemented 6699 try: 6700 pedestal, threshold = self.get_uid_bounds(first.tablename) 6701 except TypeError: 6702 e = sys.exc_info()[1] 6703 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6704 return "" 6705 try: 6706 lower_limit = int(self.expand(second)) + 1 6707 except (ValueError, TypeError): 6708 e = sys.exc_info()[1] 6709 raise Exception("Operation not supported (non integer UID)") 6710 result = "UID %s:%s" % (lower_limit, threshold) 6711 elif name == "DATE": 6712 result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6713 elif name == "SIZE": 6714 result = "LARGER %s" % self.expand(second) 6715 else: 6716 raise Exception("Operation not supported") 6717 return result
6718
6719 - def GE(self, first, second):
6720 result = None 6721 name = self.search_fields[first.name] 6722 if name == "MESSAGE": 6723 last_message = self.get_last_message(first.tablename) 6724 result = "%s:%s" % (self.expand(second), last_message) 6725 elif name == "UID": 6726 # GT and LT may not return 6727 # expected sets depending on 6728 # the uid format implemented 6729 try: 6730 pedestal, threshold = self.get_uid_bounds(first.tablename) 6731 except TypeError: 6732 e = sys.exc_info()[1] 6733 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6734 return "" 6735 lower_limit = self.expand(second) 6736 result = "UID %s:%s" % (lower_limit, threshold) 6737 elif name == "DATE": 6738 result = "SINCE %s" % self.convert_date(second) 6739 else: 6740 raise Exception("Operation not supported") 6741 return result
6742
6743 - def LT(self, first, second):
6744 result = None 6745 name = self.search_fields[first.name] 6746 if name == "MESSAGE": 6747 result = "%s:%s" % (1, int(self.expand(second)) - 1) 6748 elif name == "UID": 6749 try: 6750 pedestal, threshold = self.get_uid_bounds(first.tablename) 6751 except TypeError: 6752 e = sys.exc_info()[1] 6753 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6754 return "" 6755 try: 6756 upper_limit = int(self.expand(second)) - 1 6757 except (ValueError, TypeError): 6758 e = sys.exc_info()[1] 6759 raise Exception("Operation not supported (non integer UID)") 6760 result = "UID %s:%s" % (pedestal, upper_limit) 6761 elif name == "DATE": 6762 result = "BEFORE %s" % self.convert_date(second) 6763 elif name == "SIZE": 6764 result = "SMALLER %s" % self.expand(second) 6765 else: 6766 raise Exception("Operation not supported") 6767 return result
6768
6769 - def LE(self, first, second):
6770 result = None 6771 name = self.search_fields[first.name] 6772 if name == "MESSAGE": 6773 result = "%s:%s" % (1, self.expand(second)) 6774 elif name == "UID": 6775 try: 6776 pedestal, threshold = self.get_uid_bounds(first.tablename) 6777 except TypeError: 6778 e = sys.exc_info()[1] 6779 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6780 return "" 6781 upper_limit = int(self.expand(second)) 6782 result = "UID %s:%s" % (pedestal, upper_limit) 6783 elif name == "DATE": 6784 result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6785 else: 6786 raise Exception("Operation not supported") 6787 return result
6788
6789 - def NE(self, first, second=None):
6790 if (second is None) and isinstance(first, Field): 6791 # All records special table query 6792 if first.type == "id": 6793 return self.GE(first, 1) 6794 result = self.NOT(self.EQ(first, second)) 6795 result = result.replace("NOT NOT", "").strip() 6796 return result
6797
6798 - def EQ(self,first,second):
6799 name = self.search_fields[first.name] 6800 result = None 6801 if name is not None: 6802 if name == "MESSAGE": 6803 # query by message sequence number 6804 result = "%s" % self.expand(second) 6805 elif name == "UID": 6806 result = "UID %s" % self.expand(second) 6807 elif name == "DATE": 6808 result = "ON %s" % self.convert_date(second) 6809 6810 elif name in self.flags.values(): 6811 if second: 6812 result = "%s" % (name.upper()[1:]) 6813 else: 6814 result = "NOT %s" % (name.upper()[1:]) 6815 else: 6816 raise Exception("Operation not supported") 6817 else: 6818 raise Exception("Operation not supported") 6819 return result
6820
6821 - def AND(self, first, second):
6822 result = "%s %s" % (self.expand(first), self.expand(second)) 6823 return result
6824
6825 - def OR(self, first, second):
6826 result = "OR %s %s" % (self.expand(first), self.expand(second)) 6827 return "%s" % result.replace("OR OR", "OR")
6828
6829 - def NOT(self, first):
6830 result = "NOT %s" % self.expand(first) 6831 return result
6832 6833 ######################################################################## 6834 # end of adapters 6835 ######################################################################## 6836 6837 ADAPTERS = { 6838 'sqlite': SQLiteAdapter, 6839 'spatialite': SpatiaLiteAdapter, 6840 'sqlite:memory': SQLiteAdapter, 6841 'spatialite:memory': SpatiaLiteAdapter, 6842 'mysql': MySQLAdapter, 6843 'postgres': PostgreSQLAdapter, 6844 'postgres:psycopg2': PostgreSQLAdapter, 6845 'postgres:pg8000': PostgreSQLAdapter, 6846 'postgres2:psycopg2': NewPostgreSQLAdapter, 6847 'postgres2:pg8000': NewPostgreSQLAdapter, 6848 'oracle': OracleAdapter, 6849 'mssql': MSSQLAdapter, 6850 'mssql2': MSSQL2Adapter, 6851 'mssql3': MSSQL3Adapter, 6852 'vertica': VerticaAdapter, 6853 'sybase': SybaseAdapter, 6854 'db2': DB2Adapter, 6855 'teradata': TeradataAdapter, 6856 'informix': InformixAdapter, 6857 'informix-se': InformixSEAdapter, 6858 'firebird': FireBirdAdapter, 6859 'firebird_embedded': FireBirdAdapter, 6860 'ingres': IngresAdapter, 6861 'ingresu': IngresUnicodeAdapter, 6862 'sapdb': SAPDBAdapter, 6863 'cubrid': CubridAdapter, 6864 'jdbc:sqlite': JDBCSQLiteAdapter, 6865 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 6866 'jdbc:postgres': JDBCPostgreSQLAdapter, 6867 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 6868 'google:datastore': GoogleDatastoreAdapter, 6869 'google:sql': GoogleSQLAdapter, 6870 'couchdb': CouchDBAdapter, 6871 'mongodb': MongoDBAdapter, 6872 'imap': IMAPAdapter 6873 }
6874 6875 -def sqlhtml_validators(field):
6876 """ 6877 Field type validation, using web2py's validators mechanism. 6878 6879 makes sure the content of a field is in line with the declared 6880 fieldtype 6881 """ 6882 db = field.db 6883 try: 6884 from gluon import validators 6885 except ImportError: 6886 return [] 6887 field_type, field_length = field.type, field.length 6888 if isinstance(field_type, SQLCustomType): 6889 if hasattr(field_type, 'validator'): 6890 return field_type.validator 6891 else: 6892 field_type = field_type.type 6893 elif not isinstance(field_type,str): 6894 return [] 6895 requires=[] 6896 def ff(r,id): 6897 row=r(id) 6898 if not row: 6899 return id 6900 elif hasattr(r, '_format') and isinstance(r._format,str): 6901 return r._format % row 6902 elif hasattr(r, '_format') and callable(r._format): 6903 return r._format(row) 6904 else: 6905 return id
6906 if field_type in (('string', 'text', 'password')): 6907 requires.append(validators.IS_LENGTH(field_length)) 6908 elif field_type == 'json': 6909 requires.append(validators.IS_EMPTY_OR(validators.IS_JSON(native_json=field.db._adapter.native_json))) 6910 elif field_type == 'double' or field_type == 'float': 6911 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 6912 elif field_type in ('integer','bigint'): 6913 requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) 6914 elif field_type.startswith('decimal'): 6915 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 6916 elif field_type == 'date': 6917 requires.append(validators.IS_DATE()) 6918 elif field_type == 'time': 6919 requires.append(validators.IS_TIME()) 6920 elif field_type == 'datetime': 6921 requires.append(validators.IS_DATETIME()) 6922 elif db and field_type.startswith('reference') and \ 6923 field_type.find('.') < 0 and \ 6924 field_type[10:] in db.tables: 6925 referenced = db[field_type[10:]] 6926 def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) 6927 field.represent = field.represent or repr_ref 6928 if hasattr(referenced, '_format') and referenced._format: 6929 requires = validators.IS_IN_DB(db,referenced._id, 6930 referenced._format) 6931 if field.unique: 6932 requires._and = validators.IS_NOT_IN_DB(db,field) 6933 if field.tablename == field_type[10:]: 6934 return validators.IS_EMPTY_OR(requires) 6935 return requires 6936 elif db and field_type.startswith('list:reference') and \ 6937 field_type.find('.') < 0 and \ 6938 field_type[15:] in db.tables: 6939 referenced = db[field_type[15:]] 6940 def list_ref_repr(ids, row=None, r=referenced, f=ff): 6941 if not ids: 6942 return None 6943 refs = None 6944 db, id = r._db, r._id 6945 if isinstance(db._adapter, GoogleDatastoreAdapter): 6946 def count(values): return db(id.belongs(values)).select(id) 6947 rx = range(0, len(ids), 30) 6948 refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx]) 6949 else: 6950 refs = db(id.belongs(ids)).select(id) 6951 return (refs and ', '.join(f(r,x.id) for x in refs) or '') 6952 field.represent = field.represent or list_ref_repr 6953 if hasattr(referenced, '_format') and referenced._format: 6954 requires = validators.IS_IN_DB(db,referenced._id, 6955 referenced._format,multiple=True) 6956 else: 6957 requires = validators.IS_IN_DB(db,referenced._id, 6958 multiple=True) 6959 if field.unique: 6960 requires._and = validators.IS_NOT_IN_DB(db,field) 6961 if not field.notnull: 6962 requires = validators.IS_EMPTY_OR(requires) 6963 return requires 6964 elif field_type.startswith('list:'): 6965 def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) 6966 field.represent = field.represent or repr_list 6967 if field.unique: 6968 requires.insert(0,validators.IS_NOT_IN_DB(db,field)) 6969 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 6970 if field.notnull and not field_type[:2] in sff: 6971 requires.insert(0, validators.IS_NOT_EMPTY()) 6972 elif not field.notnull and field_type[:2] in sff and requires: 6973 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 6974 return requires 6975
6976 6977 -def bar_escape(item):
6978 return str(item).replace('|', '||')
6979
6980 -def bar_encode(items):
6981 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
6982
6983 -def bar_decode_integer(value):
6984 if not hasattr(value,'split') and hasattr(value,'read'): 6985 value = value.read() 6986 return [long(x) for x in value.split('|') if x.strip()]
6987
6988 -def bar_decode_string(value):
6989 return [x.replace('||', '|') for x in 6990 REGEX_UNPACK.split(value[1:-1]) if x.strip()]
6991
6992 6993 -class Row(object):
6994 6995 """ 6996 a dictionary that lets you do d['a'] as well as d.a 6997 this is only used to store a Row 6998 """ 6999 7000 __init__ = lambda self,*args,**kwargs: self.__dict__.update(*args,**kwargs) 7001
7002 - def __getitem__(self, k):
7003 key=str(k) 7004 _extra = self.__dict__.get('_extra', None) 7005 if _extra is not None: 7006 v = _extra.get(key, DEFAULT) 7007 if v != DEFAULT: 7008 return v 7009 m = REGEX_TABLE_DOT_FIELD.match(key) 7010 if m: 7011 try: 7012 return ogetattr(self, m.group(1))[m.group(2)] 7013 except (KeyError,AttributeError,TypeError): 7014 key = m.group(2) 7015 try: 7016 return ogetattr(self, key) 7017 except (KeyError,AttributeError,TypeError), ae: 7018 try: 7019 self[key] = ogetattr(self,'__get_lazy_reference__')(key) 7020 return self[key] 7021 except: 7022 raise ae
7023 7024 __setitem__ = lambda self, key, value: setattr(self, str(key), value) 7025 7026 __delitem__ = object.__delattr__ 7027 7028 __copy__ = lambda self: Row(self) 7029 7030 __call__ = __getitem__ 7031 7032
7033 - def get(self, key, default=None):
7034 try: 7035 return self.__getitem__(key) 7036 except(KeyError, AttributeError, TypeError): 7037 return self.__dict__.get(key,default)
7038 7039 has_key = __contains__ = lambda self, key: key in self.__dict__ 7040 7041 __nonzero__ = lambda self: len(self.__dict__)>0 7042 7043 update = lambda self, *args, **kwargs: self.__dict__.update(*args, **kwargs) 7044 7045 keys = lambda self: self.__dict__.keys() 7046 7047 items = lambda self: self.__dict__.items() 7048 7049 values = lambda self: self.__dict__.values() 7050 7051 __iter__ = lambda self: self.__dict__.__iter__() 7052 7053 iteritems = lambda self: self.__dict__.iteritems() 7054 7055 __str__ = __repr__ = lambda self: '<Row %s>' % self.as_dict() 7056 7057 __int__ = lambda self: object.__getattribute__(self,'id') 7058 7059 __long__ = lambda self: long(object.__getattribute__(self,'id')) 7060 7061 __getattr__ = __getitem__ 7062 7063 # def __getattribute__(self, key): 7064 # try: 7065 # return object.__getattribute__(self, key) 7066 # except AttributeError, ae: 7067 # try: 7068 # return self.__get_lazy_reference__(key) 7069 # except: 7070 # raise ae 7071
7072 - def __eq__(self,other):
7073 try: 7074 return self.as_dict() == other.as_dict() 7075 except AttributeError: 7076 return False
7077
7078 - def __ne__(self,other):
7079 return not (self == other)
7080
7081 - def __copy__(self):
7082 return Row(dict(self))
7083
7084 - def as_dict(self, datetime_to_str=False, custom_types=None):
7085 SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict] 7086 if isinstance(custom_types,(list,tuple,set)): 7087 SERIALIZABLE_TYPES += custom_types 7088 elif custom_types: 7089 SERIALIZABLE_TYPES.append(custom_types) 7090 d = dict(self) 7091 for k in copy.copy(d.keys()): 7092 v=d[k] 7093 if d[k] is None: 7094 continue 7095 elif isinstance(v,Row): 7096 d[k]=v.as_dict() 7097 elif isinstance(v,Reference): 7098 d[k]=long(v) 7099 elif isinstance(v,decimal.Decimal): 7100 d[k]=float(v) 7101 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 7102 if datetime_to_str: 7103 d[k] = v.isoformat().replace('T',' ')[:19] 7104 elif not isinstance(v,tuple(SERIALIZABLE_TYPES)): 7105 del d[k] 7106 return d
7107
7108 - def as_xml(self, row_name="row", colnames=None, indent=' '):
7109 def f(row,field,indent=' '): 7110 if isinstance(row,Row): 7111 spc = indent+' \n' 7112 items = [f(row[x],x,indent+' ') for x in row] 7113 return '%s<%s>\n%s\n%s</%s>' % ( 7114 indent, 7115 field, 7116 spc.join(item for item in items if item), 7117 indent, 7118 field) 7119 elif not callable(row): 7120 if REGEX_ALPHANUMERIC.match(field): 7121 return '%s<%s>%s</%s>' % (indent,field,row,field) 7122 else: 7123 return '%s<extra name="%s">%s</extra>' % \ 7124 (indent,field,row) 7125 else: 7126 return None
7127 return f(self, row_name, indent=indent)
7128
7129 - def as_json(self, mode="object", default=None, colnames=None, 7130 serialize=True, **kwargs):
7131 """ 7132 serializes the row to a JSON object 7133 kwargs are passed to .as_dict method 7134 only "object" mode supported 7135 7136 serialize = False used by Rows.as_json 7137 TODO: return array mode with query column order 7138 7139 mode and colnames are not implemented 7140 """ 7141 7142 item = self.as_dict(**kwargs) 7143 if serialize: 7144 if have_serializers: 7145 return serializers.json(item, 7146 default=default or 7147 serializers.custom_json) 7148 elif simplejson: 7149 return simplejson.dumps(item) 7150 else: 7151 raise RuntimeError("missing simplejson") 7152 else: 7153 return item
7154
7155 7156 ################################################################################ 7157 # Everything below should be independent of the specifics of the database 7158 # and should work for RDBMs and some NoSQL databases 7159 ################################################################################ 7160 7161 -class SQLCallableList(list):
7162 - def __call__(self):
7163 return copy.copy(self)
7164
7165 -def smart_query(fields,text):
7166 if not isinstance(fields,(list,tuple)): 7167 fields = [fields] 7168 new_fields = [] 7169 for field in fields: 7170 if isinstance(field,Field): 7171 new_fields.append(field) 7172 elif isinstance(field,Table): 7173 for ofield in field: 7174 new_fields.append(ofield) 7175 else: 7176 raise RuntimeError("fields must be a list of fields") 7177 fields = new_fields 7178 field_map = {} 7179 for field in fields: 7180 n = field.name.lower() 7181 if not n in field_map: 7182 field_map[n] = field 7183 n = str(field).lower() 7184 if not n in field_map: 7185 field_map[n] = field 7186 constants = {} 7187 i = 0 7188 while True: 7189 m = REGEX_CONST_STRING.search(text) 7190 if not m: break 7191 text = text[:m.start()]+('#%i' % i)+text[m.end():] 7192 constants[str(i)] = m.group()[1:-1] 7193 i+=1 7194 text = re.sub('\s+',' ',text).lower() 7195 for a,b in [('&','and'), 7196 ('|','or'), 7197 ('~','not'), 7198 ('==','='), 7199 ('<','<'), 7200 ('>','>'), 7201 ('<=','<='), 7202 ('>=','>='), 7203 ('<>','!='), 7204 ('=<','<='), 7205 ('=>','>='), 7206 ('=','='), 7207 (' less or equal than ','<='), 7208 (' greater or equal than ','>='), 7209 (' equal or less than ','<='), 7210 (' equal or greater than ','>='), 7211 (' less or equal ','<='), 7212 (' greater or equal ','>='), 7213 (' equal or less ','<='), 7214 (' equal or greater ','>='), 7215 (' not equal to ','!='), 7216 (' not equal ','!='), 7217 (' equal to ','='), 7218 (' equal ','='), 7219 (' equals ','='), 7220 (' less than ','<'), 7221 (' greater than ','>'), 7222 (' starts with ','startswith'), 7223 (' ends with ','endswith'), 7224 (' not in ' , 'notbelongs'), 7225 (' in ' , 'belongs'), 7226 (' is ','=')]: 7227 if a[0]==' ': 7228 text = text.replace(' is'+a,' %s ' % b) 7229 text = text.replace(a,' %s ' % b) 7230 text = re.sub('\s+',' ',text).lower() 7231 text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text) 7232 query = field = neg = op = logic = None 7233 for item in text.split(): 7234 if field is None: 7235 if item == 'not': 7236 neg = True 7237 elif not neg and not logic and item in ('and','or'): 7238 logic = item 7239 elif item in field_map: 7240 field = field_map[item] 7241 else: 7242 raise RuntimeError("Invalid syntax") 7243 elif not field is None and op is None: 7244 op = item 7245 elif not op is None: 7246 if item.startswith('#'): 7247 if not item[1:] in constants: 7248 raise RuntimeError("Invalid syntax") 7249 value = constants[item[1:]] 7250 else: 7251 value = item 7252 if field.type in ('text', 'string', 'json'): 7253 if op == '=': op = 'like' 7254 if op == '=': new_query = field==value 7255 elif op == '<': new_query = field<value 7256 elif op == '>': new_query = field>value 7257 elif op == '<=': new_query = field<=value 7258 elif op == '>=': new_query = field>=value 7259 elif op == '!=': new_query = field!=value 7260 elif op == 'belongs': new_query = field.belongs(value.split(',')) 7261 elif op == 'notbelongs': new_query = ~field.belongs(value.split(',')) 7262 elif field.type in ('text', 'string', 'json'): 7263 if op == 'contains': new_query = field.contains(value) 7264 elif op == 'like': new_query = field.like(value) 7265 elif op == 'startswith': new_query = field.startswith(value) 7266 elif op == 'endswith': new_query = field.endswith(value) 7267 else: raise RuntimeError("Invalid operation") 7268 elif field._db._adapter.dbengine=='google:datastore' and \ 7269 field.type in ('list:integer', 'list:string', 'list:reference'): 7270 if op == 'contains': new_query = field.contains(value) 7271 else: raise RuntimeError("Invalid operation") 7272 else: raise RuntimeError("Invalid operation") 7273 if neg: new_query = ~new_query 7274 if query is None: 7275 query = new_query 7276 elif logic == 'and': 7277 query &= new_query 7278 elif logic == 'or': 7279 query |= new_query 7280 field = op = neg = logic = None 7281 return query
7282
7283 -class DAL(object):
7284 7285 """ 7286 an instance of this class represents a database connection 7287 7288 Example:: 7289 7290 db = DAL('sqlite://test.db') 7291 7292 or 7293 7294 db = DAL(**{"uri": ..., "tables": [...]...}) # experimental 7295 7296 db.define_table('tablename', Field('fieldname1'), 7297 Field('fieldname2')) 7298 """ 7299
7300 - def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
7301 if not hasattr(THREAD_LOCAL,'db_instances'): 7302 THREAD_LOCAL.db_instances = {} 7303 if not hasattr(THREAD_LOCAL,'db_instances_zombie'): 7304 THREAD_LOCAL.db_instances_zombie = {} 7305 if uri == '<zombie>': 7306 db_uid = kwargs['db_uid'] # a zombie must have a db_uid! 7307 if db_uid in THREAD_LOCAL.db_instances: 7308 db_group = THREAD_LOCAL.db_instances[db_uid] 7309 db = db_group[-1] 7310 elif db_uid in THREAD_LOCAL.db_instances_zombie: 7311 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7312 else: 7313 db = super(DAL, cls).__new__(cls) 7314 THREAD_LOCAL.db_instances_zombie[db_uid] = db 7315 else: 7316 db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest()) 7317 if db_uid in THREAD_LOCAL.db_instances_zombie: 7318 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7319 del THREAD_LOCAL.db_instances_zombie[db_uid] 7320 else: 7321 db = super(DAL, cls).__new__(cls) 7322 db_group = THREAD_LOCAL.db_instances.get(db_uid,[]) 7323 db_group.append(db) 7324 THREAD_LOCAL.db_instances[db_uid] = db_group 7325 db._db_uid = db_uid 7326 return db
7327 7328 @staticmethod
7329 - def set_folder(folder):
7330 """ 7331 # ## this allows gluon to set a folder for this thread 7332 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 7333 """ 7334 BaseAdapter.set_folder(folder)
7335 7336 @staticmethod
7337 - def get_instances():
7338 """ 7339 Returns a dictionary with uri as key with timings and defined tables 7340 {'sqlite://storage.sqlite': { 7341 'dbstats': [(select auth_user.email from auth_user, 0.02009)], 7342 'dbtables': { 7343 'defined': ['auth_cas', 'auth_event', 'auth_group', 7344 'auth_membership', 'auth_permission', 'auth_user'], 7345 'lazy': '[]' 7346 } 7347 } 7348 } 7349 """ 7350 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 7351 infos = {} 7352 for db_uid, db_group in dbs: 7353 for db in db_group: 7354 if not db._uri: 7355 continue 7356 k = hide_password(db._uri) 7357 infos[k] = dict(dbstats = [(row[0], row[1]) for row in db._timings], 7358 dbtables = {'defined': 7359 sorted(list(set(db.tables) - 7360 set(db._LAZY_TABLES.keys()))), 7361 'lazy': sorted(db._LAZY_TABLES.keys())} 7362 ) 7363 return infos
7364 7365 @staticmethod
7366 - def distributed_transaction_begin(*instances):
7367 if not instances: 7368 return 7369 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7370 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7371 instances = enumerate(instances) 7372 for (i, db) in instances: 7373 if not db._adapter.support_distributed_transaction(): 7374 raise SyntaxError( 7375 'distributed transaction not suported by %s' % db._dbname) 7376 for (i, db) in instances: 7377 db._adapter.distributed_transaction_begin(keys[i])
7378 7379 @staticmethod
7380 - def distributed_transaction_commit(*instances):
7381 if not instances: 7382 return 7383 instances = enumerate(instances) 7384 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7385 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7386 for (i, db) in instances: 7387 if not db._adapter.support_distributed_transaction(): 7388 raise SyntaxError( 7389 'distributed transaction not suported by %s' % db._dbanme) 7390 try: 7391 for (i, db) in instances: 7392 db._adapter.prepare(keys[i]) 7393 except: 7394 for (i, db) in instances: 7395 db._adapter.rollback_prepared(keys[i]) 7396 raise RuntimeError('failure to commit distributed transaction') 7397 else: 7398 for (i, db) in instances: 7399 db._adapter.commit_prepared(keys[i]) 7400 return
7401
7402 - def __init__(self, uri=DEFAULT_URI, 7403 pool_size=0, folder=None, 7404 db_codec='UTF-8', check_reserved=None, 7405 migrate=True, fake_migrate=False, 7406 migrate_enabled=True, fake_migrate_all=False, 7407 decode_credentials=False, driver_args=None, 7408 adapter_args=None, attempts=5, auto_import=False, 7409 bigint_id=False, debug=False, lazy_tables=False, 7410 db_uid=None, do_connect=True, 7411 after_connection=None, tables=None):
7412 """ 7413 Creates a new Database Abstraction Layer instance. 7414 7415 Keyword arguments: 7416 7417 :uri: string that contains information for connecting to a database. 7418 (default: 'sqlite://dummy.db') 7419 7420 experimental: you can specify a dictionary as uri 7421 parameter i.e. with 7422 db = DAL({"uri": "sqlite://storage.sqlite", 7423 "tables": {...}, ...}) 7424 7425 for an example of dict input you can check the output 7426 of the scaffolding db model with 7427 7428 db.as_dict() 7429 7430 Note that for compatibility with Python older than 7431 version 2.6.5 you should cast your dict input keys 7432 to str due to a syntax limitation on kwarg names. 7433 for proper DAL dictionary input you can use one of: 7434 7435 obj = serializers.cast_keys(dict, [encoding="utf-8"]) 7436 7437 or else (for parsing json input) 7438 7439 obj = serializers.loads_json(data, unicode_keys=False) 7440 7441 :pool_size: How many open connections to make to the database object. 7442 :folder: where .table files will be created. 7443 automatically set within web2py 7444 use an explicit path when using DAL outside web2py 7445 :db_codec: string encoding of the database (default: 'UTF-8') 7446 :check_reserved: list of adapters to check tablenames and column names 7447 against sql/nosql reserved keywords. (Default None) 7448 7449 * 'common' List of sql keywords that are common to all database types 7450 such as "SELECT, INSERT". (recommended) 7451 * 'all' Checks against all known SQL keywords. (not recommended) 7452 <adaptername> Checks against the specific adapters list of keywords 7453 (recommended) 7454 * '<adaptername>_nonreserved' Checks against the specific adapters 7455 list of nonreserved keywords. (if available) 7456 :migrate (defaults to True) sets default migrate behavior for all tables 7457 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 7458 :migrate_enabled (defaults to True). If set to False disables ALL migrations 7459 :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables 7460 :attempts (defaults to 5). Number of times to attempt connecting 7461 :auto_import (defaults to False). If set, import automatically table definitions from the 7462 databases folder 7463 :bigint_id (defaults to False): If set, turn on bigint instead of int for id fields 7464 :lazy_tables (defaults to False): delay table definition until table access 7465 :after_connection (defaults to None): a callable that will be execute after the connection 7466 """ 7467 if uri == '<zombie>' and db_uid is not None: return 7468 if not decode_credentials: 7469 credential_decoder = lambda cred: cred 7470 else: 7471 credential_decoder = lambda cred: urllib.unquote(cred) 7472 self._folder = folder 7473 if folder: 7474 self.set_folder(folder) 7475 self._uri = uri 7476 self._pool_size = pool_size 7477 self._db_codec = db_codec 7478 self._lastsql = '' 7479 self._timings = [] 7480 self._pending_references = {} 7481 self._request_tenant = 'request_tenant' 7482 self._common_fields = [] 7483 self._referee_name = '%(table)s' 7484 self._bigint_id = bigint_id 7485 self._debug = debug 7486 self._migrated = [] 7487 self._LAZY_TABLES = {} 7488 self._lazy_tables = lazy_tables 7489 self._tables = SQLCallableList() 7490 self._driver_args = driver_args 7491 self._adapter_args = adapter_args 7492 self._check_reserved = check_reserved 7493 self._decode_credentials = decode_credentials 7494 self._attempts = attempts 7495 self._do_connect = do_connect 7496 7497 if not str(attempts).isdigit() or attempts < 0: 7498 attempts = 5 7499 if uri: 7500 uris = isinstance(uri,(list,tuple)) and uri or [uri] 7501 error = '' 7502 connected = False 7503 for k in range(attempts): 7504 for uri in uris: 7505 try: 7506 if is_jdbc and not uri.startswith('jdbc:'): 7507 uri = 'jdbc:'+uri 7508 self._dbname = REGEX_DBNAME.match(uri).group() 7509 if not self._dbname in ADAPTERS: 7510 raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname) 7511 # notice that driver args or {} else driver_args 7512 # defaults to {} global, not correct 7513 kwargs = dict(db=self,uri=uri, 7514 pool_size=pool_size, 7515 folder=folder, 7516 db_codec=db_codec, 7517 credential_decoder=credential_decoder, 7518 driver_args=driver_args or {}, 7519 adapter_args=adapter_args or {}, 7520 do_connect=do_connect, 7521 after_connection=after_connection) 7522 self._adapter = ADAPTERS[self._dbname](**kwargs) 7523 types = ADAPTERS[self._dbname].types 7524 # copy so multiple DAL() possible 7525 self._adapter.types = copy.copy(types) 7526 self._adapter.build_parsemap() 7527 if bigint_id: 7528 if 'big-id' in types and 'reference' in types: 7529 self._adapter.types['id'] = types['big-id'] 7530 self._adapter.types['reference'] = types['big-reference'] 7531 connected = True 7532 break 7533 except SyntaxError: 7534 raise 7535 except Exception: 7536 tb = traceback.format_exc() 7537 sys.stderr.write('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb)) 7538 if connected: 7539 break 7540 else: 7541 time.sleep(1) 7542 if not connected: 7543 raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb)) 7544 else: 7545 self._adapter = BaseAdapter(db=self,pool_size=0, 7546 uri='None',folder=folder, 7547 db_codec=db_codec, after_connection=after_connection) 7548 migrate = fake_migrate = False 7549 adapter = self._adapter 7550 self._uri_hash = hashlib_md5(adapter.uri).hexdigest() 7551 self.check_reserved = check_reserved 7552 if self.check_reserved: 7553 from reserved_sql_keywords import ADAPTERS as RSK 7554 self.RSK = RSK 7555 self._migrate = migrate 7556 self._fake_migrate = fake_migrate 7557 self._migrate_enabled = migrate_enabled 7558 self._fake_migrate_all = fake_migrate_all 7559 if auto_import or tables: 7560 self.import_table_definitions(adapter.folder, 7561 tables=tables)
7562 7563 @property
7564 - def tables(self):
7565 return self._tables
7566
7567 - def import_table_definitions(self, path, migrate=False, 7568 fake_migrate=False, tables=None):
7569 pattern = pjoin(path,self._uri_hash+'_*.table') 7570 if tables: 7571 for table in tables: 7572 self.define_table(**table) 7573 else: 7574 for filename in glob.glob(pattern): 7575 tfile = self._adapter.file_open(filename, 'r') 7576 try: 7577 sql_fields = pickle.load(tfile) 7578 name = filename[len(pattern)-7:-6] 7579 mf = [(value['sortable'], 7580 Field(key, 7581 type=value['type'], 7582 length=value.get('length',None), 7583 notnull=value.get('notnull',False), 7584 unique=value.get('unique',False))) \ 7585 for key, value in sql_fields.iteritems()] 7586 mf.sort(lambda a,b: cmp(a[0],b[0])) 7587 self.define_table(name,*[item[1] for item in mf], 7588 **dict(migrate=migrate, 7589 fake_migrate=fake_migrate)) 7590 finally: 7591 self._adapter.file_close(tfile)
7592
7593 - def check_reserved_keyword(self, name):
7594 """ 7595 Validates ``name`` against SQL keywords 7596 Uses self.check_reserve which is a list of 7597 operators to use. 7598 self.check_reserved 7599 ['common', 'postgres', 'mysql'] 7600 self.check_reserved 7601 ['all'] 7602 """ 7603 for backend in self.check_reserved: 7604 if name.upper() in self.RSK[backend]: 7605 raise SyntaxError( 7606 'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
7607
7608 - def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
7609 """ 7610 EXAMPLE: 7611 7612 db.define_table('person',Field('name'),Field('info')) 7613 db.define_table('pet',Field('ownedby',db.person),Field('name'),Field('info')) 7614 7615 @request.restful() 7616 def index(): 7617 def GET(*args,**vars): 7618 patterns = [ 7619 "/friends[person]", 7620 "/{person.name}/:field", 7621 "/{person.name}/pets[pet.ownedby]", 7622 "/{person.name}/pets[pet.ownedby]/{pet.name}", 7623 "/{person.name}/pets[pet.ownedby]/{pet.name}/:field", 7624 ("/dogs[pet]", db.pet.info=='dog'), 7625 ("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'), 7626 ] 7627 parser = db.parse_as_rest(patterns,args,vars) 7628 if parser.status == 200: 7629 return dict(content=parser.response) 7630 else: 7631 raise HTTP(parser.status,parser.error) 7632 7633 def POST(table_name,**vars): 7634 if table_name == 'person': 7635 return db.person.validate_and_insert(**vars) 7636 elif table_name == 'pet': 7637 return db.pet.validate_and_insert(**vars) 7638 else: 7639 raise HTTP(400) 7640 return locals() 7641 """ 7642 7643 db = self 7644 re1 = REGEX_SEARCH_PATTERN 7645 re2 = REGEX_SQUARE_BRACKETS 7646 7647 def auto_table(table,base='',depth=0): 7648 patterns = [] 7649 for field in db[table].fields: 7650 if base: 7651 tag = '%s/%s' % (base,field.replace('_','-')) 7652 else: 7653 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 7654 f = db[table][field] 7655 if not f.readable: continue 7656 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 7657 tag += '/{%s.%s}' % (table,field) 7658 patterns.append(tag) 7659 patterns.append(tag+'/:field') 7660 elif f.type.startswith('boolean'): 7661 tag += '/{%s.%s}' % (table,field) 7662 patterns.append(tag) 7663 patterns.append(tag+'/:field') 7664 elif f.type in ('float','double','integer','bigint'): 7665 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 7666 patterns.append(tag) 7667 patterns.append(tag+'/:field') 7668 elif f.type.startswith('list:'): 7669 tag += '/{%s.%s.contains}' % (table,field) 7670 patterns.append(tag) 7671 patterns.append(tag+'/:field') 7672 elif f.type in ('date','datetime'): 7673 tag+= '/{%s.%s.year}' % (table,field) 7674 patterns.append(tag) 7675 patterns.append(tag+'/:field') 7676 tag+='/{%s.%s.month}' % (table,field) 7677 patterns.append(tag) 7678 patterns.append(tag+'/:field') 7679 tag+='/{%s.%s.day}' % (table,field) 7680 patterns.append(tag) 7681 patterns.append(tag+'/:field') 7682 if f.type in ('datetime','time'): 7683 tag+= '/{%s.%s.hour}' % (table,field) 7684 patterns.append(tag) 7685 patterns.append(tag+'/:field') 7686 tag+='/{%s.%s.minute}' % (table,field) 7687 patterns.append(tag) 7688 patterns.append(tag+'/:field') 7689 tag+='/{%s.%s.second}' % (table,field) 7690 patterns.append(tag) 7691 patterns.append(tag+'/:field') 7692 if depth>0: 7693 for f in db[table]._referenced_by: 7694 tag+='/%s[%s.%s]' % (table,f.tablename,f.name) 7695 patterns.append(tag) 7696 patterns += auto_table(table,base=tag,depth=depth-1) 7697 return patterns
7698 7699 if patterns == 'auto': 7700 patterns=[] 7701 for table in db.tables: 7702 if not table.startswith('auth_'): 7703 patterns.append('/%s[%s]' % (table,table)) 7704 patterns += auto_table(table,base='',depth=1) 7705 else: 7706 i = 0 7707 while i<len(patterns): 7708 pattern = patterns[i] 7709 if not isinstance(pattern,str): 7710 pattern = pattern[0] 7711 tokens = pattern.split('/') 7712 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 7713 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1], 7714 '/'.join(tokens[:-1])) 7715 patterns = patterns[:i]+new_patterns+patterns[i+1:] 7716 i += len(new_patterns) 7717 else: 7718 i += 1 7719 if '/'.join(args) == 'patterns': 7720 return Row({'status':200,'pattern':'list', 7721 'error':None,'response':patterns}) 7722 for pattern in patterns: 7723 basequery, exposedfields = None, [] 7724 if isinstance(pattern,tuple): 7725 if len(pattern)==2: 7726 pattern, basequery = pattern 7727 elif len(pattern)>2: 7728 pattern, basequery, exposedfields = pattern[0:3] 7729 otable=table=None 7730 if not isinstance(queries,dict): 7731 dbset=db(queries) 7732 if basequery is not None: 7733 dbset = dbset(basequery) 7734 i=0 7735 tags = pattern[1:].split('/') 7736 if len(tags)!=len(args): 7737 continue 7738 for tag in tags: 7739 if re1.match(tag): 7740 # print 're1:'+tag 7741 tokens = tag[1:-1].split('.') 7742 table, field = tokens[0], tokens[1] 7743 if not otable or table == otable: 7744 if len(tokens)==2 or tokens[2]=='eq': 7745 query = db[table][field]==args[i] 7746 elif tokens[2]=='ne': 7747 query = db[table][field]!=args[i] 7748 elif tokens[2]=='lt': 7749 query = db[table][field]<args[i] 7750 elif tokens[2]=='gt': 7751 query = db[table][field]>args[i] 7752 elif tokens[2]=='ge': 7753 query = db[table][field]>=args[i] 7754 elif tokens[2]=='le': 7755 query = db[table][field]<=args[i] 7756 elif tokens[2]=='year': 7757 query = db[table][field].year()==args[i] 7758 elif tokens[2]=='month': 7759 query = db[table][field].month()==args[i] 7760 elif tokens[2]=='day': 7761 query = db[table][field].day()==args[i] 7762 elif tokens[2]=='hour': 7763 query = db[table][field].hour()==args[i] 7764 elif tokens[2]=='minute': 7765 query = db[table][field].minutes()==args[i] 7766 elif tokens[2]=='second': 7767 query = db[table][field].seconds()==args[i] 7768 elif tokens[2]=='startswith': 7769 query = db[table][field].startswith(args[i]) 7770 elif tokens[2]=='contains': 7771 query = db[table][field].contains(args[i]) 7772 else: 7773 raise RuntimeError("invalid pattern: %s" % pattern) 7774 if len(tokens)==4 and tokens[3]=='not': 7775 query = ~query 7776 elif len(tokens)>=4: 7777 raise RuntimeError("invalid pattern: %s" % pattern) 7778 if not otable and isinstance(queries,dict): 7779 dbset = db(queries[table]) 7780 if basequery is not None: 7781 dbset = dbset(basequery) 7782 dbset=dbset(query) 7783 else: 7784 raise RuntimeError("missing relation in pattern: %s" % pattern) 7785 elif re2.match(tag) and args[i]==tag[:tag.find('[')]: 7786 ref = tag[tag.find('[')+1:-1] 7787 if '.' in ref and otable: 7788 table,field = ref.split('.') 7789 selfld = '_id' 7790 if db[table][field].type.startswith('reference '): 7791 refs = [ x.name for x in db[otable] if x.type == db[table][field].type ] 7792 else: 7793 refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ] 7794 if refs: 7795 selfld = refs[0] 7796 if nested_select: 7797 try: 7798 dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld]))) 7799 except ValueError: 7800 return Row({'status':400,'pattern':pattern, 7801 'error':'invalid path','response':None}) 7802 else: 7803 items = [item.id for item in dbset.select(db[otable][selfld])] 7804 dbset=db(db[table][field].belongs(items)) 7805 else: 7806 table = ref 7807 if not otable and isinstance(queries,dict): 7808 dbset = db(queries[table]) 7809 dbset=dbset(db[table]) 7810 elif tag==':field' and table: 7811 # print 're3:'+tag 7812 field = args[i] 7813 if not field in db[table]: break 7814 # hand-built patterns should respect .readable=False as well 7815 if not db[table][field].readable: 7816 return Row({'status':418,'pattern':pattern, 7817 'error':'I\'m a teapot','response':None}) 7818 try: 7819 distinct = vars.get('distinct', False) == 'True' 7820 offset = long(vars.get('offset',None) or 0) 7821 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 7822 except ValueError: 7823 return Row({'status':400,'error':'invalid limits','response':None}) 7824 items = dbset.select(db[table][field], distinct=distinct, limitby=limits) 7825 if items: 7826 return Row({'status':200,'response':items, 7827 'pattern':pattern}) 7828 else: 7829 return Row({'status':404,'pattern':pattern, 7830 'error':'no record found','response':None}) 7831 elif tag != args[i]: 7832 break 7833 otable = table 7834 i += 1 7835 if i==len(tags) and table: 7836 ofields = vars.get('order',db[table]._id.name).split('|') 7837 try: 7838 orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields] 7839 except (KeyError, AttributeError): 7840 return Row({'status':400,'error':'invalid orderby','response':None}) 7841 if exposedfields: 7842 fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable] 7843 else: 7844 fields = [field for field in db[table] if field.readable] 7845 count = dbset.count() 7846 try: 7847 offset = long(vars.get('offset',None) or 0) 7848 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 7849 except ValueError: 7850 return Row({'status':400,'error':'invalid limits','response':None}) 7851 if count > limits[1]-limits[0]: 7852 return Row({'status':400,'error':'too many records','response':None}) 7853 try: 7854 response = dbset.select(limitby=limits,orderby=orderby,*fields) 7855 except ValueError: 7856 return Row({'status':400,'pattern':pattern, 7857 'error':'invalid path','response':None}) 7858 return Row({'status':200,'response':response, 7859 'pattern':pattern,'count':count}) 7860 return Row({'status':400,'error':'no matching pattern','response':None})
7861
7862 - def define_table( 7863 self, 7864 tablename, 7865 *fields, 7866 **args 7867 ):
7868 if not fields and 'fields' in args: 7869 fields = args.get('fields',()) 7870 if not isinstance(tablename, str): 7871 if isinstance(tablename, unicode): 7872 try: 7873 tablename = str(tablename) 7874 except UnicodeEncodeError: 7875 raise SyntaxError("invalid unicode table name") 7876 else: 7877 raise SyntaxError("missing table name") 7878 elif hasattr(self,tablename) or tablename in self.tables: 7879 if not args.get('redefine',False): 7880 raise SyntaxError('table already defined: %s' % tablename) 7881 elif tablename.startswith('_') or hasattr(self,tablename) or \ 7882 REGEX_PYTHON_KEYWORDS.match(tablename): 7883 raise SyntaxError('invalid table name: %s' % tablename) 7884 elif self.check_reserved: 7885 self.check_reserved_keyword(tablename) 7886 else: 7887 invalid_args = set(args)-TABLE_ARGS 7888 if invalid_args: 7889 raise SyntaxError('invalid table "%s" attributes: %s' \ 7890 % (tablename,invalid_args)) 7891 if self._lazy_tables and not tablename in self._LAZY_TABLES: 7892 self._LAZY_TABLES[tablename] = (tablename,fields,args) 7893 table = None 7894 else: 7895 table = self.lazy_define_table(tablename,*fields,**args) 7896 if not tablename in self.tables: 7897 self.tables.append(tablename) 7898 return table
7899
7900 - def lazy_define_table( 7901 self, 7902 tablename, 7903 *fields, 7904 **args 7905 ):
7906 args_get = args.get 7907 common_fields = self._common_fields 7908 if common_fields: 7909 fields = list(fields) + list(common_fields) 7910 7911 table_class = args_get('table_class',Table) 7912 table = table_class(self, tablename, *fields, **args) 7913 table._actual = True 7914 self[tablename] = table 7915 # must follow above line to handle self references 7916 table._create_references() 7917 for field in table: 7918 if field.requires == DEFAULT: 7919 field.requires = sqlhtml_validators(field) 7920 7921 migrate = self._migrate_enabled and args_get('migrate',self._migrate) 7922 if migrate and not self._uri in (None,'None') \ 7923 or self._adapter.dbengine=='google:datastore': 7924 fake_migrate = self._fake_migrate_all or \ 7925 args_get('fake_migrate',self._fake_migrate) 7926 polymodel = args_get('polymodel',None) 7927 try: 7928 GLOBAL_LOCKER.acquire() 7929 self._lastsql = self._adapter.create_table( 7930 table,migrate=migrate, 7931 fake_migrate=fake_migrate, 7932 polymodel=polymodel) 7933 finally: 7934 GLOBAL_LOCKER.release() 7935 else: 7936 table._dbt = None 7937 on_define = args_get('on_define',None) 7938 if on_define: on_define(table) 7939 return table
7940
7941 - def as_dict(self, flat=False, sanitize=True):
7942 db_uid = uri = None 7943 if not sanitize: 7944 uri, db_uid = (self._uri, self._db_uid) 7945 db_as_dict = dict(tables=[], uri=uri, db_uid=db_uid, 7946 **dict([(k, getattr(self, "_" + k, None)) 7947 for k in 'pool_size','folder','db_codec', 7948 'check_reserved','migrate','fake_migrate', 7949 'migrate_enabled','fake_migrate_all', 7950 'decode_credentials','driver_args', 7951 'adapter_args', 'attempts', 7952 'bigint_id','debug','lazy_tables', 7953 'do_connect'])) 7954 for table in self: 7955 db_as_dict["tables"].append(table.as_dict(flat=flat, 7956 sanitize=sanitize)) 7957 return db_as_dict
7958
7959 - def as_xml(self, sanitize=True):
7960 if not have_serializers: 7961 raise ImportError("No xml serializers available") 7962 d = self.as_dict(flat=True, sanitize=sanitize) 7963 return serializers.xml(d)
7964
7965 - def as_json(self, sanitize=True):
7966 if not have_serializers: 7967 raise ImportError("No json serializers available") 7968 d = self.as_dict(flat=True, sanitize=sanitize) 7969 return serializers.json(d)
7970
7971 - def as_yaml(self, sanitize=True):
7972 if not have_serializers: 7973 raise ImportError("No YAML serializers available") 7974 d = self.as_dict(flat=True, sanitize=sanitize) 7975 return serializers.yaml(d)
7976
7977 - def __contains__(self, tablename):
7978 try: 7979 return tablename in self.tables 7980 except AttributeError: 7981 # The instance has no .tables attribute yet 7982 return False
7983 7984 has_key = __contains__ 7985
7986 - def get(self,key,default=None):
7987 return self.__dict__.get(key,default)
7988
7989 - def __iter__(self):
7990 for tablename in self.tables: 7991 yield self[tablename]
7992
7993 - def __getitem__(self, key):
7994 return self.__getattr__(str(key))
7995
7996 - def __getattr__(self, key):
7997 if ogetattr(self,'_lazy_tables') and \ 7998 key in ogetattr(self,'_LAZY_TABLES'): 7999 tablename, fields, args = self._LAZY_TABLES.pop(key) 8000 return self.lazy_define_table(tablename,*fields,**args) 8001 return ogetattr(self, key)
8002
8003 - def __setitem__(self, key, value):
8004 osetattr(self, str(key), value)
8005
8006 - def __setattr__(self, key, value):
8007 if key[:1]!='_' and key in self: 8008 raise SyntaxError( 8009 'Object %s exists and cannot be redefined' % key) 8010 osetattr(self,key,value)
8011 8012 __delitem__ = object.__delattr__ 8013
8014 - def __repr__(self):
8015 if hasattr(self,'_uri'): 8016 return '<DAL uri="%s">' % hide_password(str(self._uri)) 8017 else: 8018 return '<DAL db_uid="%s">' % self._db_uid
8019
8020 - def smart_query(self,fields,text):
8021 return Set(self, smart_query(fields,text))
8022
8023 - def __call__(self, query=None, ignore_common_filters=None):
8024 if isinstance(query,Table): 8025 query = self._adapter.id_query(query) 8026 elif isinstance(query,Field): 8027 query = query!=None 8028 elif isinstance(query, dict): 8029 icf = query.get("ignore_common_filters") 8030 if icf: ignore_common_filters = icf 8031 return Set(self, query, ignore_common_filters=ignore_common_filters)
8032
8033 - def commit(self):
8034 self._adapter.commit()
8035
8036 - def rollback(self):
8037 self._adapter.rollback()
8038
8039 - def close(self):
8040 self._adapter.close() 8041 if self._db_uid in THREAD_LOCAL.db_instances: 8042 db_group = THREAD_LOCAL.db_instances[self._db_uid] 8043 db_group.remove(self) 8044 if not db_group: 8045 del THREAD_LOCAL.db_instances[self._db_uid]
8046
8047 - def executesql(self, query, placeholders=None, as_dict=False, 8048 fields=None, colnames=None):
8049 """ 8050 placeholders is optional and will always be None. 8051 If using raw SQL with placeholders, placeholders may be 8052 a sequence of values to be substituted in 8053 or, (if supported by the DB driver), a dictionary with keys 8054 matching named placeholders in your SQL. 8055 8056 Added 2009-12-05 "as_dict" optional argument. Will always be 8057 None when using DAL. If using raw SQL can be set to True 8058 and the results cursor returned by the DB driver will be 8059 converted to a sequence of dictionaries keyed with the db 8060 field names. Tested with SQLite but should work with any database 8061 since the cursor.description used to get field names is part of the 8062 Python dbi 2.0 specs. Results returned with as_dict=True are 8063 the same as those returned when applying .to_list() to a DAL query. 8064 8065 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 8066 8067 Added 2012-08-24 "fields" and "colnames" optional arguments. If either 8068 is provided, the results cursor returned by the DB driver will be 8069 converted to a DAL Rows object using the db._adapter.parse() method. 8070 8071 The "fields" argument is a list of DAL Field objects that match the 8072 fields returned from the DB. The Field objects should be part of one or 8073 more Table objects defined on the DAL object. The "fields" list can 8074 include one or more DAL Table objects in addition to or instead of 8075 including Field objects, or it can be just a single table (not in a 8076 list). In that case, the Field objects will be extracted from the 8077 table(s). 8078 8079 Instead of specifying the "fields" argument, the "colnames" argument 8080 can be specified as a list of field names in tablename.fieldname format. 8081 Again, these should represent tables and fields defined on the DAL 8082 object. 8083 8084 It is also possible to specify both "fields" and the associated 8085 "colnames". In that case, "fields" can also include DAL Expression 8086 objects in addition to Field objects. For Field objects in "fields", 8087 the associated "colnames" must still be in tablename.fieldname format. 8088 For Expression objects in "fields", the associated "colnames" can 8089 be any arbitrary labels. 8090 8091 Note, the DAL Table objects referred to by "fields" or "colnames" can 8092 be dummy tables and do not have to represent any real tables in the 8093 database. Also, note that the "fields" and "colnames" must be in the 8094 same order as the fields in the results cursor returned from the DB. 8095 """ 8096 adapter = self._adapter 8097 if placeholders: 8098 adapter.execute(query, placeholders) 8099 else: 8100 adapter.execute(query) 8101 if as_dict: 8102 if not hasattr(adapter.cursor,'description'): 8103 raise RuntimeError("database does not support executesql(...,as_dict=True)") 8104 # Non-DAL legacy db query, converts cursor results to dict. 8105 # sequence of 7-item sequences. each sequence tells about a column. 8106 # first item is always the field name according to Python Database API specs 8107 columns = adapter.cursor.description 8108 # reduce the column info down to just the field names 8109 fields = [f[0] for f in columns] 8110 # will hold our finished resultset in a list 8111 data = adapter._fetchall() 8112 # convert the list for each row into a dictionary so it's 8113 # easier to work with. row['field_name'] rather than row[0] 8114 return [dict(zip(fields,row)) for row in data] 8115 try: 8116 data = adapter._fetchall() 8117 except: 8118 return None 8119 if fields or colnames: 8120 fields = [] if fields is None else fields 8121 if not isinstance(fields, list): 8122 fields = [fields] 8123 extracted_fields = [] 8124 for field in fields: 8125 if isinstance(field, Table): 8126 extracted_fields.extend([f for f in field]) 8127 else: 8128 extracted_fields.append(field) 8129 if not colnames: 8130 colnames = ['%s.%s' % (f.tablename, f.name) 8131 for f in extracted_fields] 8132 data = adapter.parse( 8133 data, fields=extracted_fields, colnames=colnames) 8134 return data
8135
8136 - def _remove_references_to(self, thistable):
8137 for table in self: 8138 table._referenced_by = [field for field in table._referenced_by 8139 if not field.table==thistable]
8140
8141 - def export_to_csv_file(self, ofile, *args, **kwargs):
8142 step = long(kwargs.get('max_fetch_rows,',500)) 8143 write_colnames = kwargs['write_colnames'] = \ 8144 kwargs.get("write_colnames", True) 8145 for table in self.tables: 8146 ofile.write('TABLE %s\r\n' % table) 8147 query = self._adapter.id_query(self[table]) 8148 nrows = self(query).count() 8149 kwargs['write_colnames'] = write_colnames 8150 for k in range(0,nrows,step): 8151 self(query).select(limitby=(k,k+step)).export_to_csv_file( 8152 ofile, *args, **kwargs) 8153 kwargs['write_colnames'] = False 8154 ofile.write('\r\n\r\n') 8155 ofile.write('END')
8156
8157 - def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', 8158 unique='uuid', map_tablenames=None, 8159 ignore_missing_tables=False, 8160 *args, **kwargs):
8161 #if id_map is None: id_map={} 8162 id_offset = {} # only used if id_map is None 8163 map_tablenames = map_tablenames or {} 8164 for line in ifile: 8165 line = line.strip() 8166 if not line: 8167 continue 8168 elif line == 'END': 8169 return 8170 elif not line.startswith('TABLE ') or \ 8171 not line[6:] in self.tables: 8172 raise SyntaxError('invalid file format') 8173 else: 8174 tablename = line[6:] 8175 tablename = map_tablenames.get(tablename,tablename) 8176 if tablename is not None and tablename in self.tables: 8177 self[tablename].import_from_csv_file( 8178 ifile, id_map, null, unique, id_offset, 8179 *args, **kwargs) 8180 elif tablename is None or ignore_missing_tables: 8181 # skip all non-empty lines 8182 for line in ifile: 8183 if not line.strip(): 8184 break 8185 else: 8186 raise RuntimeError("Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)")
8187
8188 8189 -def DAL_unpickler(db_uid):
8190 return DAL('<zombie>',db_uid=db_uid)
8191
8192 -def DAL_pickler(db):
8193 return DAL_unpickler, (db._db_uid,)
8194 8195 copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
8196 8197 -class SQLALL(object):
8198 """ 8199 Helper class providing a comma-separated string having all the field names 8200 (prefixed by table name and '.') 8201 8202 normally only called from within gluon.sql 8203 """ 8204
8205 - def __init__(self, table):
8206 self._table = table
8207
8208 - def __str__(self):
8209 return ', '.join([str(field) for field in self._table])
8210
8211 # class Reference(int): 8212 -class Reference(long):
8213
8214 - def __allocate(self):
8215 if not self._record: 8216 self._record = self._table[long(self)] 8217 if not self._record: 8218 raise RuntimeError( 8219 "Using a recursive select but encountered a broken reference: %s %d"%(self._table, long(self)))
8220
8221 - def __getattr__(self, key):
8222 if key == 'id': 8223 return long(self) 8224 if key in self._table: 8225 self.__allocate() 8226 if self._record: 8227 return self._record.get(key,None) # to deal with case self.update_record() 8228 else: 8229 return None
8230
8231 - def get(self, key, default=None):
8232 return self.__getattr__(key, default)
8233
8234 - def __setattr__(self, key, value):
8235 if key.startswith('_'): 8236 long.__setattr__(self, key, value) 8237 return 8238 self.__allocate() 8239 self._record[key] = value
8240
8241 - def __getitem__(self, key):
8242 if key == 'id': 8243 return long(self) 8244 self.__allocate() 8245 return self._record.get(key, None)
8246
8247 - def __setitem__(self,key,value):
8248 self.__allocate() 8249 self._record[key] = value
8250
8251 8252 -def Reference_unpickler(data):
8253 return marshal.loads(data)
8254
8255 -def Reference_pickler(data):
8256 try: 8257 marshal_dump = marshal.dumps(long(data)) 8258 except AttributeError: 8259 marshal_dump = 'i%s' % struct.pack('<i', long(data)) 8260 return (Reference_unpickler, (marshal_dump,))
8261 8262 copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
8263 8264 -class MethodAdder(object):
8265 - def __init__(self,table):
8266 self.table = table
8267 - def __call__(self):
8268 return self.register()
8269 - def __getattr__(self,method_name):
8270 return self.register(method_name)
8271 - def register(self,method_name=None):
8272 def _decorated(f): 8273 instance = self.table 8274 import types 8275 method = types.MethodType(f, instance, instance.__class__) 8276 name = method_name or f.func_name 8277 setattr(instance, name, method) 8278 return f
8279 return _decorated
8280
8281 -class Table(object):
8282 8283 """ 8284 an instance of this class represents a database table 8285 8286 Example:: 8287 8288 db = DAL(...) 8289 db.define_table('users', Field('name')) 8290 db.users.insert(name='me') # print db.users._insert(...) to see SQL 8291 db.users.drop() 8292 """ 8293
8294 - def __init__( 8295 self, 8296 db, 8297 tablename, 8298 *fields, 8299 **args 8300 ):
8301 """ 8302 Initializes the table and performs checking on the provided fields. 8303 8304 Each table will have automatically an 'id'. 8305 8306 If a field is of type Table, the fields (excluding 'id') from that table 8307 will be used instead. 8308 8309 :raises SyntaxError: when a supplied field is of incorrect type. 8310 """ 8311 self._actual = False # set to True by define_table() 8312 self._tablename = tablename 8313 self._ot = args.get('actual_name') 8314 self._sequence_name = args.get('sequence_name') or \ 8315 db and db._adapter.sequence_name(tablename) 8316 self._trigger_name = args.get('trigger_name') or \ 8317 db and db._adapter.trigger_name(tablename) 8318 self._common_filter = args.get('common_filter') 8319 self._format = args.get('format') 8320 self._singular = args.get( 8321 'singular',tablename.replace('_',' ').capitalize()) 8322 self._plural = args.get( 8323 'plural',pluralize(self._singular.lower()).capitalize()) 8324 # horrible but for backard compatibility of appamdin: 8325 if 'primarykey' in args and args['primarykey'] is not None: 8326 self._primarykey = args.get('primarykey') 8327 8328 self._before_insert = [] 8329 self._before_update = [Set.delete_uploaded_files] 8330 self._before_delete = [Set.delete_uploaded_files] 8331 self._after_insert = [] 8332 self._after_update = [] 8333 self._after_delete = [] 8334 8335 self.add_method = MethodAdder(self) 8336 8337 fieldnames,newfields=set(),[] 8338 _primarykey = getattr(self, '_primarykey', None) 8339 if _primarykey is not None: 8340 if not isinstance(_primarykey, list): 8341 raise SyntaxError( 8342 "primarykey must be a list of fields from table '%s'" \ 8343 % tablename) 8344 if len(_primarykey)==1: 8345 self._id = [f for f in fields if isinstance(f,Field) \ 8346 and f.name==_primarykey[0]][0] 8347 elif not [f for f in fields if (isinstance(f,Field) and 8348 f.type=='id') or (isinstance(f, dict) and 8349 f.get("type", None)=="id")]: 8350 field = Field('id', 'id') 8351 newfields.append(field) 8352 fieldnames.add('id') 8353 self._id = field 8354 virtual_fields = [] 8355 def include_new(field): 8356 newfields.append(field) 8357 fieldnames.add(field.name) 8358 if field.type=='id': 8359 self._id = field
8360 for field in fields: 8361 if isinstance(field, (FieldMethod, FieldVirtual)): 8362 virtual_fields.append(field) 8363 elif isinstance(field, Field) and not field.name in fieldnames: 8364 if field.db is not None: 8365 field = copy.copy(field) 8366 include_new(field) 8367 elif isinstance(field, dict) and not field['fieldname'] in fieldnames: 8368 include_new(Field(**field)) 8369 elif isinstance(field, Table): 8370 table = field 8371 for field in table: 8372 if not field.name in fieldnames and not field.type=='id': 8373 t2 = not table._actual and self._tablename 8374 include_new(field.clone(point_self_references_to=t2)) 8375 elif not isinstance(field, (Field, Table)): 8376 raise SyntaxError( 8377 'define_table argument is not a Field or Table: %s' % field) 8378 fields = newfields 8379 self._db = db 8380 tablename = tablename 8381 self._fields = SQLCallableList() 8382 self.virtualfields = [] 8383 fields = list(fields) 8384 8385 if db and db._adapter.uploads_in_blob==True: 8386 uploadfields = [f.name for f in fields if f.type=='blob'] 8387 for field in fields: 8388 fn = field.uploadfield 8389 if isinstance(field, Field) and field.type == 'upload'\ 8390 and fn is True: 8391 fn = field.uploadfield = '%s_blob' % field.name 8392 if isinstance(fn,str) and not fn in uploadfields: 8393 fields.append(Field(fn,'blob',default='', 8394 writable=False,readable=False)) 8395 8396 lower_fieldnames = set() 8397 reserved = dir(Table) + ['fields'] 8398 if (db and db.check_reserved): 8399 check_reserved = db.check_reserved_keyword 8400 else: 8401 def check_reserved(field_name): 8402 if field_name in reserved: 8403 raise SyntaxError("field name %s not allowed" % field_name)
8404 for field in fields: 8405 field_name = field.name 8406 check_reserved(field_name) 8407 fn_lower = field_name.lower() 8408 if fn_lower in lower_fieldnames: 8409 raise SyntaxError("duplicate field %s in table %s" \ 8410 % (field_name, tablename)) 8411 else: 8412 lower_fieldnames.add(fn_lower) 8413 8414 self.fields.append(field_name) 8415 self[field_name] = field 8416 if field.type == 'id': 8417 self['id'] = field 8418 field.tablename = field._tablename = tablename 8419 field.table = field._table = self 8420 field.db = field._db = db 8421 self.ALL = SQLALL(self) 8422 8423 if _primarykey is not None: 8424 for k in _primarykey: 8425 if k not in self.fields: 8426 raise SyntaxError( 8427 "primarykey must be a list of fields from table '%s " % tablename) 8428 else: 8429 self[k].notnull = True 8430 for field in virtual_fields: 8431 self[field.name] = field 8432 8433 @property
8434 - def fields(self):
8435 return self._fields
8436
8437 - def update(self,*args,**kwargs):
8438 raise RuntimeError("Syntax Not Supported")
8439
8440 - def _enable_record_versioning(self, 8441 archive_db=None, 8442 archive_name = '%(tablename)s_archive', 8443 is_active = 'is_active', 8444 current_record = 'current_record', 8445 current_record_label = None):
8446 db = self._db 8447 archive_db = archive_db or db 8448 archive_name = archive_name % dict(tablename=self._tablename) 8449 if archive_name in archive_db.tables(): 8450 return # do not try define the archive if already exists 8451 fieldnames = self.fields() 8452 same_db = archive_db is db 8453 field_type = self if same_db else 'bigint' 8454 clones = [] 8455 for field in self: 8456 nfk = same_db or not field.type.startswith('reference') 8457 clones.append(field.clone( 8458 unique=False, type=field.type if nfk else 'bigint')) 8459 archive_db.define_table( 8460 archive_name, 8461 Field(current_record,field_type,label=current_record_label), 8462 *clones,**dict(format=self._format)) 8463 8464 self._before_update.append( 8465 lambda qset,fs,db=archive_db,an=archive_name,cn=current_record: 8466 archive_record(qset,fs,db[an],cn)) 8467 if is_active and is_active in fieldnames: 8468 self._before_delete.append( 8469 lambda qset: qset.update(is_active=False)) 8470 newquery = lambda query, t=self, name=self._tablename: \ 8471 reduce(AND,[db[tn].is_active == True 8472 for tn in db._adapter.tables(query) 8473 if tn==name or getattr(db[tn],'_ot',None)==name]) 8474 query = self._common_filter 8475 if query: 8476 newquery = query & newquery 8477 self._common_filter = newquery
8478
8479 - def _validate(self,**vars):
8480 errors = Row() 8481 for key,value in vars.iteritems(): 8482 value,error = self[key].validate(value) 8483 if error: 8484 errors[key] = error 8485 return errors
8486
8487 - def _create_references(self):
8488 db = self._db 8489 pr = db._pending_references 8490 self._referenced_by = [] 8491 self._references = [] 8492 for field in self: 8493 fieldname = field.name 8494 field_type = field.type 8495 if isinstance(field_type,str) and field_type[:10] == 'reference ': 8496 ref = field_type[10:].strip() 8497 if not ref: 8498 SyntaxError('Table: reference to nothing: %s' %ref) 8499 if '.' in ref: 8500 rtablename, throw_it,rfieldname = ref.partition('.') 8501 else: 8502 rtablename, rfieldname = ref, None 8503 if not rtablename in db: 8504 pr[rtablename] = pr.get(rtablename,[]) + [field] 8505 continue 8506 rtable = db[rtablename] 8507 if rfieldname: 8508 if not hasattr(rtable,'_primarykey'): 8509 raise SyntaxError( 8510 'keyed tables can only reference other keyed tables (for now)') 8511 if rfieldname not in rtable.fields: 8512 raise SyntaxError( 8513 "invalid field '%s' for referenced table '%s' in table '%s'" \ 8514 % (rfieldname, rtablename, self._tablename)) 8515 rfield = rtable[rfieldname] 8516 else: 8517 rfield = rtable._id 8518 rtable._referenced_by.append(field) 8519 field.referent = rfield 8520 self._references.append(field) 8521 else: 8522 field.referent = None 8523 for referee in pr.get(self._tablename,[]): 8524 self._referenced_by.append(referee)
8525
8526 - def _filter_fields(self, record, id=False):
8527 return dict([(k, v) for (k, v) in record.iteritems() if k 8528 in self.fields and (self[k].type!='id' or id)])
8529
8530 - def _build_query(self,key):
8531 """ for keyed table only """ 8532 query = None 8533 for k,v in key.iteritems(): 8534 if k in self._primarykey: 8535 if query: 8536 query = query & (self[k] == v) 8537 else: 8538 query = (self[k] == v) 8539 else: 8540 raise SyntaxError( 8541 'Field %s is not part of the primary key of %s' % \ 8542 (k,self._tablename)) 8543 return query
8544
8545 - def __getitem__(self, key):
8546 if not key: 8547 return None 8548 elif isinstance(key, dict): 8549 """ for keyed table """ 8550 query = self._build_query(key) 8551 return self._db(query).select(limitby=(0,1), orderby_on_limitby=False).first() 8552 elif str(key).isdigit() or 'google' in DRIVERS and isinstance(key, Key): 8553 return self._db(self._id == key).select(limitby=(0,1), orderby_on_limitby=False).first() 8554 elif key: 8555 return ogetattr(self, str(key))
8556
8557 - def __call__(self, key=DEFAULT, **kwargs):
8558 for_update = kwargs.get('_for_update',False) 8559 if '_for_update' in kwargs: del kwargs['_for_update'] 8560 8561 orderby = kwargs.get('_orderby',None) 8562 if '_orderby' in kwargs: del kwargs['_orderby'] 8563 8564 if not key is DEFAULT: 8565 if isinstance(key, Query): 8566 record = self._db(key).select( 8567 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8568 elif not str(key).isdigit(): 8569 record = None 8570 else: 8571 record = self._db(self._id == key).select( 8572 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8573 if record: 8574 for k,v in kwargs.iteritems(): 8575 if record[k]!=v: return None 8576 return record 8577 elif kwargs: 8578 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()]) 8579 return self._db(query).select(limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8580 else: 8581 return None
8582
8583 - def __setitem__(self, key, value):
8584 if isinstance(key, dict) and isinstance(value, dict): 8585 """ option for keyed table """ 8586 if set(key.keys()) == set(self._primarykey): 8587 value = self._filter_fields(value) 8588 kv = {} 8589 kv.update(value) 8590 kv.update(key) 8591 if not self.insert(**kv): 8592 query = self._build_query(key) 8593 self._db(query).update(**self._filter_fields(value)) 8594 else: 8595 raise SyntaxError( 8596 'key must have all fields from primary key: %s'%\ 8597 (self._primarykey)) 8598 elif str(key).isdigit(): 8599 if key == 0: 8600 self.insert(**self._filter_fields(value)) 8601 elif self._db(self._id == key)\ 8602 .update(**self._filter_fields(value)) is None: 8603 raise SyntaxError('No such record: %s' % key) 8604 else: 8605 if isinstance(key, dict): 8606 raise SyntaxError( 8607 'value must be a dictionary: %s' % value) 8608 osetattr(self, str(key), value)
8609 8610 __getattr__ = __getitem__ 8611
8612 - def __setattr__(self, key, value):
8613 if key[:1]!='_' and key in self: 8614 raise SyntaxError('Object exists and cannot be redefined: %s' % key) 8615 osetattr(self,key,value)
8616
8617 - def __delitem__(self, key):
8618 if isinstance(key, dict): 8619 query = self._build_query(key) 8620 if not self._db(query).delete(): 8621 raise SyntaxError('No such record: %s' % key) 8622 elif not str(key).isdigit() or \ 8623 not self._db(self._id == key).delete(): 8624 raise SyntaxError('No such record: %s' % key)
8625
8626 - def __contains__(self,key):
8627 return hasattr(self,key)
8628 8629 has_key = __contains__ 8630
8631 - def items(self):
8632 return self.__dict__.items()
8633
8634 - def __iter__(self):
8635 for fieldname in self.fields: 8636 yield self[fieldname]
8637
8638 - def iteritems(self):
8639 return self.__dict__.iteritems()
8640 8641
8642 - def __repr__(self):
8643 return '<Table %s (%s)>' % (self._tablename,','.join(self.fields()))
8644
8645 - def __str__(self):
8646 if self._ot is not None: 8647 ot = self._db._adapter.QUOTE_TEMPLATE % self._ot 8648 if 'Oracle' in str(type(self._db._adapter)): 8649 return '%s %s' % (ot, self._tablename) 8650 return '%s AS %s' % (ot, self._tablename) 8651 return self._tablename
8652
8653 - def _drop(self, mode = ''):
8654 return self._db._adapter._drop(self, mode)
8655
8656 - def drop(self, mode = ''):
8657 return self._db._adapter.drop(self,mode)
8658
8659 - def _listify(self,fields,update=False):
8660 new_fields = {} # format: new_fields[name] = (field,value) 8661 8662 # store all fields passed as input in new_fields 8663 for name in fields: 8664 if not name in self.fields: 8665 if name != 'id': 8666 raise SyntaxError( 8667 'Field %s does not belong to the table' % name) 8668 else: 8669 field = self[name] 8670 value = fields[name] 8671 if field.filter_in: 8672 value = field.filter_in(value) 8673 new_fields[name] = (field,value) 8674 8675 # check all fields that should be in the table but are not passed 8676 to_compute = [] 8677 for ofield in self: 8678 name = ofield.name 8679 if not name in new_fields: 8680 # if field is supposed to be computed, compute it! 8681 if ofield.compute: # save those to compute for later 8682 to_compute.append((name,ofield)) 8683 # if field is required, check its default value 8684 elif not update and not ofield.default is None: 8685 value = ofield.default 8686 fields[name] = value 8687 new_fields[name] = (ofield,value) 8688 # if this is an update, user the update field instead 8689 elif update and not ofield.update is None: 8690 value = ofield.update 8691 fields[name] = value 8692 new_fields[name] = (ofield,value) 8693 # if the field is still not there but it should, error 8694 elif not update and ofield.required: 8695 raise RuntimeError( 8696 'Table: missing required field: %s' % name) 8697 # now deal with fields that are supposed to be computed 8698 if to_compute: 8699 row = Row(fields) 8700 for name,ofield in to_compute: 8701 # try compute it 8702 try: 8703 row[name] = new_value = ofield.compute(row) 8704 new_fields[name] = (ofield, new_value) 8705 except (KeyError, AttributeError): 8706 # error silently unless field is required! 8707 if ofield.required: 8708 raise SyntaxError('unable to compute field: %s' % name) 8709 return new_fields.values()
8710
8711 - def _attempt_upload(self, fields):
8712 for field in self: 8713 if field.type=='upload' and field.name in fields: 8714 value = fields[field.name] 8715 if value is not None and not isinstance(value,str): 8716 if hasattr(value,'file') and hasattr(value,'filename'): 8717 new_name = field.store(value.file,filename=value.filename) 8718 elif hasattr(value,'read') and hasattr(value,'name'): 8719 new_name = field.store(value,filename=value.name) 8720 else: 8721 raise RuntimeError("Unable to handle upload") 8722 fields[field.name] = new_name
8723
8724 - def _defaults(self, fields):
8725 "If there are no fields/values specified, return table defaults" 8726 if not fields: 8727 fields = {} 8728 for field in self: 8729 if field.type != "id": 8730 fields[field.name] = field.default 8731 return fields
8732
8733 - def _insert(self, **fields):
8734 fields = self._defaults(fields) 8735 return self._db._adapter._insert(self, self._listify(fields))
8736
8737 - def insert(self, **fields):
8738 fields = self._defaults(fields) 8739 self._attempt_upload(fields) 8740 if any(f(fields) for f in self._before_insert): return 0 8741 ret = self._db._adapter.insert(self, self._listify(fields)) 8742 if ret and self._after_insert: 8743 fields = Row(fields) 8744 [f(fields,ret) for f in self._after_insert] 8745 return ret
8746
8747 - def validate_and_insert(self,**fields):
8748 response = Row() 8749 response.errors = Row() 8750 new_fields = copy.copy(fields) 8751 for key,value in fields.iteritems(): 8752 value,error = self[key].validate(value) 8753 if error: 8754 response.errors[key] = "%s" % error 8755 else: 8756 new_fields[key] = value 8757 if not response.errors: 8758 response.id = self.insert(**new_fields) 8759 else: 8760 response.id = None 8761 return response
8762
8763 - def validate_and_update(self, _key=DEFAULT, **fields):
8764 response = Row() 8765 response.errors = Row() 8766 new_fields = copy.copy(fields) 8767 8768 for key,value in fields.iteritems(): 8769 value,error = self[key].validate(value) 8770 if error: 8771 response.errors[key] = "%s" % error 8772 else: 8773 new_fields[key] = value 8774 8775 if _key is DEFAULT: 8776 record = self(**values) 8777 elif isinstance(_key,dict): 8778 record = self(**_key) 8779 else: 8780 record = self(_key) 8781 8782 if not response.errors and record: 8783 row = self._db(self._id==_key) 8784 response.id = row.update(**fields) 8785 else: 8786 response.id = None 8787 return response
8788
8789 - def update_or_insert(self, _key=DEFAULT, **values):
8790 if _key is DEFAULT: 8791 record = self(**values) 8792 elif isinstance(_key,dict): 8793 record = self(**_key) 8794 else: 8795 record = self(_key) 8796 if record: 8797 record.update_record(**values) 8798 newid = None 8799 else: 8800 newid = self.insert(**values) 8801 return newid
8802
8803 - def bulk_insert(self, items):
8804 """ 8805 here items is a list of dictionaries 8806 """ 8807 items = [self._listify(item) for item in items] 8808 if any(f(item) for item in items for f in self._before_insert):return 0 8809 ret = self._db._adapter.bulk_insert(self,items) 8810 ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert] 8811 return ret
8812
8813 - def _truncate(self, mode = None):
8814 return self._db._adapter._truncate(self, mode)
8815
8816 - def truncate(self, mode = None):
8817 return self._db._adapter.truncate(self, mode)
8818
8819 - def import_from_csv_file( 8820 self, 8821 csvfile, 8822 id_map=None, 8823 null='<NULL>', 8824 unique='uuid', 8825 id_offset=None, # id_offset used only when id_map is None 8826 *args, **kwargs 8827 ):
8828 """ 8829 Import records from csv file. 8830 Column headers must have same names as table fields. 8831 Field 'id' is ignored. 8832 If column names read 'table.file' the 'table.' prefix is ignored. 8833 'unique' argument is a field which must be unique 8834 (typically a uuid field) 8835 'restore' argument is default False; 8836 if set True will remove old values in table first. 8837 'id_map' ff set to None will not map ids. 8838 The import will keep the id numbers in the restored table. 8839 This assumes that there is an field of type id that 8840 is integer and in incrementing order. 8841 Will keep the id numbers in restored table. 8842 """ 8843 8844 delimiter = kwargs.get('delimiter', ',') 8845 quotechar = kwargs.get('quotechar', '"') 8846 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 8847 restore = kwargs.get('restore', False) 8848 if restore: 8849 self._db[self].truncate() 8850 8851 reader = csv.reader(csvfile, delimiter=delimiter, 8852 quotechar=quotechar, quoting=quoting) 8853 colnames = None 8854 if isinstance(id_map, dict): 8855 if not self._tablename in id_map: 8856 id_map[self._tablename] = {} 8857 id_map_self = id_map[self._tablename] 8858 8859 def fix(field, value, id_map, id_offset): 8860 list_reference_s='list:reference' 8861 if value == null: 8862 value = None 8863 elif field.type=='blob': 8864 value = base64.b64decode(value) 8865 elif field.type=='double' or field.type=='float': 8866 if not value.strip(): 8867 value = None 8868 else: 8869 value = float(value) 8870 elif field.type in ('integer','bigint'): 8871 if not value.strip(): 8872 value = None 8873 else: 8874 value = long(value) 8875 elif field.type.startswith('list:string'): 8876 value = bar_decode_string(value) 8877 elif field.type.startswith(list_reference_s): 8878 ref_table = field.type[len(list_reference_s):].strip() 8879 if id_map is not None: 8880 value = [id_map[ref_table][long(v)] \ 8881 for v in bar_decode_string(value)] 8882 else: 8883 value = [v for v in bar_decode_string(value)] 8884 elif field.type.startswith('list:'): 8885 value = bar_decode_integer(value) 8886 elif id_map and field.type.startswith('reference'): 8887 try: 8888 value = id_map[field.type[9:].strip()][long(value)] 8889 except KeyError: 8890 pass 8891 elif id_offset and field.type.startswith('reference'): 8892 try: 8893 value = id_offset[field.type[9:].strip()]+long(value) 8894 except KeyError: 8895 pass 8896 return (field.name, value)
8897 8898 def is_id(colname): 8899 if colname in self: 8900 return self[colname].type == 'id' 8901 else: 8902 return False 8903 8904 first = True 8905 unique_idx = None 8906 for lineno, line in enumerate(reader): 8907 if not line: 8908 break 8909 if not colnames: 8910 # assume this is the first line of the input, contains colnames 8911 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 8912 cols, cid = [], None 8913 for i,colname in enumerate(colnames): 8914 if is_id(colname): 8915 cid = i 8916 elif colname in self.fields: 8917 cols.append((i,self[colname])) 8918 if colname == unique: 8919 unique_idx = i 8920 else: 8921 # every other line contains instead data 8922 items = [] 8923 for i, field in cols: 8924 try: 8925 items.append(fix(field, line[i], id_map, id_offset)) 8926 except ValueError: 8927 raise RuntimeError("Unable to parse line:%s field:%s value:'%s'" 8928 % (lineno+1,field,line[i])) 8929 8930 if not (id_map or cid is None or id_offset is None or unique_idx): 8931 csv_id = long(line[cid]) 8932 curr_id = self.insert(**dict(items)) 8933 if first: 8934 first = False 8935 # First curr_id is bigger than csv_id, 8936 # then we are not restoring but 8937 # extending db table with csv db table 8938 id_offset[self._tablename] = (curr_id-csv_id) \ 8939 if curr_id>csv_id else 0 8940 # create new id until we get the same as old_id+offset 8941 while curr_id<csv_id+id_offset[self._tablename]: 8942 self._db(self._db[self][colnames[cid]] == curr_id).delete() 8943 curr_id = self.insert(**dict(items)) 8944 # Validation. Check for duplicate of 'unique' &, 8945 # if present, update instead of insert. 8946 elif not unique_idx: 8947 new_id = self.insert(**dict(items)) 8948 else: 8949 unique_value = line[unique_idx] 8950 query = self._db[self][unique] == unique_value 8951 record = self._db(query).select().first() 8952 if record: 8953 record.update_record(**dict(items)) 8954 new_id = record[self._id.name] 8955 else: 8956 new_id = self.insert(**dict(items)) 8957 if id_map and cid is not None: 8958 id_map_self[long(line[cid])] = new_id 8959
8960 - def as_dict(self, flat=False, sanitize=True):
8961 table_as_dict = dict(tablename=str(self), fields=[], 8962 sequence_name=self._sequence_name, 8963 trigger_name=self._trigger_name, 8964 common_filter=self._common_filter, format=self._format, 8965 singular=self._singular, plural=self._plural) 8966 8967 for field in self: 8968 if (field.readable or field.writable) or (not sanitize): 8969 table_as_dict["fields"].append(field.as_dict( 8970 flat=flat, sanitize=sanitize)) 8971 return table_as_dict
8972
8973 - def as_xml(self, sanitize=True):
8974 if not have_serializers: 8975 raise ImportError("No xml serializers available") 8976 d = self.as_dict(flat=True, sanitize=sanitize) 8977 return serializers.xml(d)
8978
8979 - def as_json(self, sanitize=True):
8980 if not have_serializers: 8981 raise ImportError("No json serializers available") 8982 d = self.as_dict(flat=True, sanitize=sanitize) 8983 return serializers.json(d)
8984
8985 - def as_yaml(self, sanitize=True):
8986 if not have_serializers: 8987 raise ImportError("No YAML serializers available") 8988 d = self.as_dict(flat=True, sanitize=sanitize) 8989 return serializers.yaml(d)
8990
8991 - def with_alias(self, alias):
8992 return self._db._adapter.alias(self,alias)
8993
8994 - def on(self, query):
8995 return Expression(self._db,self._db._adapter.ON,self,query)
8996
8997 -def archive_record(qset,fs,archive_table,current_record):
8998 tablenames = qset.db._adapter.tables(qset.query) 8999 if len(tablenames)!=1: raise RuntimeError("cannot update join") 9000 table = qset.db[tablenames[0]] 9001 for row in qset.select(): 9002 fields = archive_table._filter_fields(row) 9003 fields[current_record] = row.id 9004 archive_table.insert(**fields) 9005 return False
9006
9007 9008 9009 -class Expression(object):
9010
9011 - def __init__( 9012 self, 9013 db, 9014 op, 9015 first=None, 9016 second=None, 9017 type=None, 9018 **optional_args 9019 ):
9020 9021 self.db = db 9022 self.op = op 9023 self.first = first 9024 self.second = second 9025 self._table = getattr(first,'_table',None) 9026 ### self._tablename = first._tablename ## CHECK 9027 if not type and first and hasattr(first,'type'): 9028 self.type = first.type 9029 else: 9030 self.type = type 9031 self.optional_args = optional_args
9032
9033 - def sum(self):
9034 db = self.db 9035 return Expression(db, db._adapter.AGGREGATE, self, 'SUM', self.type)
9036
9037 - def max(self):
9038 db = self.db 9039 return Expression(db, db._adapter.AGGREGATE, self, 'MAX', self.type)
9040
9041 - def min(self):
9042 db = self.db 9043 return Expression(db, db._adapter.AGGREGATE, self, 'MIN', self.type)
9044
9045 - def len(self):
9046 db = self.db 9047 return Expression(db, db._adapter.LENGTH, self, None, 'integer')
9048
9049 - def avg(self):
9050 db = self.db 9051 return Expression(db, db._adapter.AGGREGATE, self, 'AVG', self.type)
9052
9053 - def abs(self):
9054 db = self.db 9055 return Expression(db, db._adapter.AGGREGATE, self, 'ABS', self.type)
9056
9057 - def lower(self):
9058 db = self.db 9059 return Expression(db, db._adapter.LOWER, self, None, self.type)
9060
9061 - def upper(self):
9062 db = self.db 9063 return Expression(db, db._adapter.UPPER, self, None, self.type)
9064
9065 - def replace(self,a,b):
9066 db = self.db 9067 return Expression(db, db._adapter.REPLACE, self, (a,b), self.type)
9068
9069 - def year(self):
9070 db = self.db 9071 return Expression(db, db._adapter.EXTRACT, self, 'year', 'integer')
9072
9073 - def month(self):
9074 db = self.db 9075 return Expression(db, db._adapter.EXTRACT, self, 'month', 'integer')
9076
9077 - def day(self):
9078 db = self.db 9079 return Expression(db, db._adapter.EXTRACT, self, 'day', 'integer')
9080
9081 - def hour(self):
9082 db = self.db 9083 return Expression(db, db._adapter.EXTRACT, self, 'hour', 'integer')
9084
9085 - def minutes(self):
9086 db = self.db 9087 return Expression(db, db._adapter.EXTRACT, self, 'minute', 'integer')
9088
9089 - def coalesce(self,*others):
9090 db = self.db 9091 return Expression(db, db._adapter.COALESCE, self, others, self.type)
9092
9093 - def coalesce_zero(self):
9094 db = self.db 9095 return Expression(db, db._adapter.COALESCE_ZERO, self, None, self.type)
9096
9097 - def seconds(self):
9098 db = self.db 9099 return Expression(db, db._adapter.EXTRACT, self, 'second', 'integer')
9100
9101 - def epoch(self):
9102 db = self.db 9103 return Expression(db, db._adapter.EPOCH, self, None, 'integer')
9104
9105 - def __getslice__(self, start, stop):
9106 db = self.db 9107 if start < 0: 9108 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 9109 else: 9110 pos0 = start + 1 9111 9112 if stop < 0: 9113 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 9114 elif stop == sys.maxint: 9115 length = self.len() 9116 else: 9117 length = '(%s - %s)' % (stop + 1, pos0) 9118 return Expression(db,db._adapter.SUBSTRING, 9119 self, (pos0, length), self.type)
9120
9121 - def __getitem__(self, i):
9122 return self[i:i + 1]
9123
9124 - def __str__(self):
9125 return self.db._adapter.expand(self,self.type)
9126
9127 - def __or__(self, other): # for use in sortby
9128 db = self.db 9129 return Expression(db,db._adapter.COMMA,self,other,self.type)
9130
9131 - def __invert__(self):
9132 db = self.db 9133 if hasattr(self,'_op') and self.op == db._adapter.INVERT: 9134 return self.first 9135 return Expression(db,db._adapter.INVERT,self,type=self.type)
9136
9137 - def __add__(self, other):
9138 db = self.db 9139 return Expression(db,db._adapter.ADD,self,other,self.type)
9140
9141 - def __sub__(self, other):
9142 db = self.db 9143 if self.type in ('integer','bigint'): 9144 result_type = 'integer' 9145 elif self.type in ['date','time','datetime','double','float']: 9146 result_type = 'double' 9147 elif self.type.startswith('decimal('): 9148 result_type = self.type 9149 else: 9150 raise SyntaxError("subtraction operation not supported for type") 9151 return Expression(db,db._adapter.SUB,self,other,result_type)
9152
9153 - def __mul__(self, other):
9154 db = self.db 9155 return Expression(db,db._adapter.MUL,self,other,self.type)
9156
9157 - def __div__(self, other):
9158 db = self.db 9159 return Expression(db,db._adapter.DIV,self,other,self.type)
9160
9161 - def __mod__(self, other):
9162 db = self.db 9163 return Expression(db,db._adapter.MOD,self,other,self.type)
9164
9165 - def __eq__(self, value):
9166 db = self.db 9167 return Query(db, db._adapter.EQ, self, value)
9168
9169 - def __ne__(self, value):
9170 db = self.db 9171 return Query(db, db._adapter.NE, self, value)
9172
9173 - def __lt__(self, value):
9174 db = self.db 9175 return Query(db, db._adapter.LT, self, value)
9176
9177 - def __le__(self, value):
9178 db = self.db 9179 return Query(db, db._adapter.LE, self, value)
9180
9181 - def __gt__(self, value):
9182 db = self.db 9183 return Query(db, db._adapter.GT, self, value)
9184
9185 - def __ge__(self, value):
9186 db = self.db 9187 return Query(db, db._adapter.GE, self, value)
9188
9189 - def like(self, value, case_sensitive=False):
9190 db = self.db 9191 op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE 9192 return Query(db, op, self, value)
9193
9194 - def regexp(self, value):
9195 db = self.db 9196 return Query(db, db._adapter.REGEXP, self, value)
9197
9198 - def belongs(self, *value, **kwattr):
9199 """ 9200 Accepts the following inputs: 9201 field.belongs(1,2) 9202 field.belongs((1,2)) 9203 field.belongs(query) 9204 9205 Does NOT accept: 9206 field.belongs(1) 9207 """ 9208 db = self.db 9209 if len(value) == 1: 9210 value = value[0] 9211 if isinstance(value,Query): 9212 value = db(value)._select(value.first._table._id) 9213 elif not isinstance(value, basestring): 9214 value = set(value) 9215 if kwattr.get('null') and None in value: 9216 value.remove(None) 9217 return (self == None) | Query(db, db._adapter.BELONGS, self, value) 9218 return Query(db, db._adapter.BELONGS, self, value)
9219
9220 - def startswith(self, value):
9221 db = self.db 9222 if not self.type in ('string', 'text', 'json', 'upload'): 9223 raise SyntaxError("startswith used with incompatible field type") 9224 return Query(db, db._adapter.STARTSWITH, self, value)
9225
9226 - def endswith(self, value):
9227 db = self.db 9228 if not self.type in ('string', 'text', 'json', 'upload'): 9229 raise SyntaxError("endswith used with incompatible field type") 9230 return Query(db, db._adapter.ENDSWITH, self, value)
9231
9232 - def contains(self, value, all=False, case_sensitive=False):
9233 """ 9234 The case_sensitive parameters is only useful for PostgreSQL 9235 For other RDMBs it is ignored and contains is always case in-sensitive 9236 For MongoDB and GAE contains is always case sensitive 9237 """ 9238 db = self.db 9239 if isinstance(value,(list, tuple)): 9240 subqueries = [self.contains(str(v).strip(),case_sensitive=case_sensitive) 9241 for v in value if str(v).strip()] 9242 if not subqueries: 9243 return self.contains('') 9244 else: 9245 return reduce(all and AND or OR,subqueries) 9246 if not self.type in ('string', 'text', 'json', 'upload') and not self.type.startswith('list:'): 9247 raise SyntaxError("contains used with incompatible field type") 9248 return Query(db, db._adapter.CONTAINS, self, value, case_sensitive=case_sensitive)
9249
9250 - def with_alias(self, alias):
9251 db = self.db 9252 return Expression(db, db._adapter.AS, self, alias, self.type)
9253 9254 # GIS expressions 9255
9256 - def st_asgeojson(self, precision=15, options=0, version=1):
9257 return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self, 9258 dict(precision=precision, options=options, 9259 version=version), 'string')
9260
9261 - def st_astext(self):
9262 db = self.db 9263 return Expression(db, db._adapter.ST_ASTEXT, self, type='string')
9264
9265 - def st_x(self):
9266 db = self.db 9267 return Expression(db, db._adapter.ST_X, self, type='string')
9268
9269 - def st_y(self):
9270 db = self.db 9271 return Expression(db, db._adapter.ST_Y, self, type='string')
9272
9273 - def st_distance(self, other):
9274 db = self.db 9275 return Expression(db,db._adapter.ST_DISTANCE,self,other, 'double')
9276
9277 - def st_simplify(self, value):
9278 db = self.db 9279 return Expression(db, db._adapter.ST_SIMPLIFY, self, value, self.type)
9280 9281 # GIS queries 9282
9283 - def st_contains(self, value):
9284 db = self.db 9285 return Query(db, db._adapter.ST_CONTAINS, self, value)
9286
9287 - def st_equals(self, value):
9288 db = self.db 9289 return Query(db, db._adapter.ST_EQUALS, self, value)
9290
9291 - def st_intersects(self, value):
9292 db = self.db 9293 return Query(db, db._adapter.ST_INTERSECTS, self, value)
9294
9295 - def st_overlaps(self, value):
9296 db = self.db 9297 return Query(db, db._adapter.ST_OVERLAPS, self, value)
9298
9299 - def st_touches(self, value):
9300 db = self.db 9301 return Query(db, db._adapter.ST_TOUCHES, self, value)
9302
9303 - def st_within(self, value):
9304 db = self.db 9305 return Query(db, db._adapter.ST_WITHIN, self, value)
9306
9307 # for use in both Query and sortby 9308 9309 9310 -class SQLCustomType(object):
9311 """ 9312 allows defining of custom SQL types 9313 9314 Example:: 9315 9316 decimal = SQLCustomType( 9317 type ='double', 9318 native ='integer', 9319 encoder =(lambda x: int(float(x) * 100)), 9320 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 9321 ) 9322 9323 db.define_table( 9324 'example', 9325 Field('value', type=decimal) 9326 ) 9327 9328 :param type: the web2py type (default = 'string') 9329 :param native: the backend type 9330 :param encoder: how to encode the value to store it in the backend 9331 :param decoder: how to decode the value retrieved from the backend 9332 :param validator: what validators to use ( default = None, will use the 9333 default validator for type) 9334 """ 9335
9336 - def __init__( 9337 self, 9338 type='string', 9339 native=None, 9340 encoder=None, 9341 decoder=None, 9342 validator=None, 9343 _class=None, 9344 ):
9345 9346 self.type = type 9347 self.native = native 9348 self.encoder = encoder or (lambda x: x) 9349 self.decoder = decoder or (lambda x: x) 9350 self.validator = validator 9351 self._class = _class or type
9352
9353 - def startswith(self, text=None):
9354 try: 9355 return self.type.startswith(self, text) 9356 except TypeError: 9357 return False
9358
9359 - def __getslice__(self, a=0, b=100):
9360 return None
9361
9362 - def __getitem__(self, i):
9363 return None
9364
9365 - def __str__(self):
9366 return self._class
9367
9368 -class FieldVirtual(object):
9369 - def __init__(self, name, f=None, ftype='string',label=None,table_name=None):
9370 # for backward compatibility 9371 (self.name, self.f) = (name, f) if f else ('unknown', name) 9372 self.type = ftype 9373 self.label = label or self.name.capitalize().replace('_',' ') 9374 self.represent = lambda v,r:v 9375 self.formatter = IDENTITY 9376 self.comment = None 9377 self.readable = True 9378 self.writable = False 9379 self.requires = None 9380 self.widget = None 9381 self.tablename = table_name 9382 self.filter_out = None
9383 - def __str__(self):
9384 return '%s.%s' % (self.tablename, self.name)
9385
9386 -class FieldMethod(object):
9387 - def __init__(self, name, f=None, handler=None):
9388 # for backward compatibility 9389 (self.name, self.f) = (name, f) if f else ('unknown', name) 9390 self.handler = handler
9391
9392 -def list_represent(x,r=None):
9393 return ', '.join(str(y) for y in x or [])
9394
9395 -class Field(Expression):
9396 9397 Virtual = FieldVirtual 9398 Method = FieldMethod 9399 Lazy = FieldMethod # for backward compatibility 9400 9401 """ 9402 an instance of this class represents a database field 9403 9404 example:: 9405 9406 a = Field(name, 'string', length=32, default=None, required=False, 9407 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 9408 notnull=False, unique=False, 9409 uploadfield=True, widget=None, label=None, comment=None, 9410 uploadfield=True, # True means store on disk, 9411 # 'a_field_name' means store in this field in db 9412 # False means file content will be discarded. 9413 writable=True, readable=True, update=None, authorize=None, 9414 autodelete=False, represent=None, uploadfolder=None, 9415 uploadseparate=False # upload to separate directories by uuid_keys 9416 # first 2 character and tablename.fieldname 9417 # False - old behavior 9418 # True - put uploaded file in 9419 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 9420 # directory) 9421 uploadfs=None # a pyfilesystem where to store upload 9422 9423 to be used as argument of DAL.define_table 9424 9425 allowed field types: 9426 string, boolean, integer, double, text, blob, 9427 date, time, datetime, upload, password 9428 9429 """ 9430
9431 - def __init__( 9432 self, 9433 fieldname, 9434 type='string', 9435 length=None, 9436 default=DEFAULT, 9437 required=False, 9438 requires=DEFAULT, 9439 ondelete='CASCADE', 9440 notnull=False, 9441 unique=False, 9442 uploadfield=True, 9443 widget=None, 9444 label=None, 9445 comment=None, 9446 writable=True, 9447 readable=True, 9448 update=None, 9449 authorize=None, 9450 autodelete=False, 9451 represent=None, 9452 uploadfolder=None, 9453 uploadseparate=False, 9454 uploadfs=None, 9455 compute=None, 9456 custom_store=None, 9457 custom_retrieve=None, 9458 custom_retrieve_file_properties=None, 9459 custom_delete=None, 9460 filter_in = None, 9461 filter_out = None, 9462 custom_qualifier = None, 9463 map_none = None, 9464 ):
9465 self._db = self.db = None # both for backward compatibility 9466 self.op = None 9467 self.first = None 9468 self.second = None 9469 if isinstance(fieldname, unicode): 9470 try: 9471 fieldname = str(fieldname) 9472 except UnicodeEncodeError: 9473 raise SyntaxError('Field: invalid unicode field name') 9474 self.name = fieldname = cleanup(fieldname) 9475 if not isinstance(fieldname, str) or hasattr(Table, fieldname) or \ 9476 fieldname[0] == '_' or REGEX_PYTHON_KEYWORDS.match(fieldname): 9477 raise SyntaxError('Field: invalid field name: %s' % fieldname) 9478 self.type = type if not isinstance(type, (Table,Field)) else 'reference %s' % type 9479 self.length = length if not length is None else DEFAULTLENGTH.get(self.type,512) 9480 self.default = default if default!=DEFAULT else (update or None) 9481 self.required = required # is this field required 9482 self.ondelete = ondelete.upper() # this is for reference fields only 9483 self.notnull = notnull 9484 self.unique = unique 9485 self.uploadfield = uploadfield 9486 self.uploadfolder = uploadfolder 9487 self.uploadseparate = uploadseparate 9488 self.uploadfs = uploadfs 9489 self.widget = widget 9490 self.comment = comment 9491 self.writable = writable 9492 self.readable = readable 9493 self.update = update 9494 self.authorize = authorize 9495 self.autodelete = autodelete 9496 self.represent = list_represent if \ 9497 represent==None and type in ('list:integer','list:string') else represent 9498 self.compute = compute 9499 self.isattachment = True 9500 self.custom_store = custom_store 9501 self.custom_retrieve = custom_retrieve 9502 self.custom_retrieve_file_properties = custom_retrieve_file_properties 9503 self.custom_delete = custom_delete 9504 self.filter_in = filter_in 9505 self.filter_out = filter_out 9506 self.custom_qualifier = custom_qualifier 9507 self.label = label if label!=None else fieldname.replace('_',' ').title() 9508 self.requires = requires if requires!=None else [] 9509 self.map_none = map_none
9510
9511 - def set_attributes(self,*args,**attributes):
9512 self.__dict__.update(*args,**attributes)
9513
9514 - def clone(self,point_self_references_to=False,**args):
9515 field = copy.copy(self) 9516 if point_self_references_to and \ 9517 field.type == 'reference %s'+field._tablename: 9518 field.type = 'reference %s' % point_self_references_to 9519 field.__dict__.update(args) 9520 return field
9521
9522 - def store(self, file, filename=None, path=None):
9523 if self.custom_store: 9524 return self.custom_store(file,filename,path) 9525 if isinstance(file, cgi.FieldStorage): 9526 filename = filename or file.filename 9527 file = file.file 9528 elif not filename: 9529 filename = file.name 9530 filename = os.path.basename(filename.replace('/', os.sep)\ 9531 .replace('\\', os.sep)) 9532 m = REGEX_STORE_PATTERN.search(filename) 9533 extension = m and m.group('e') or 'txt' 9534 uuid_key = web2py_uuid().replace('-', '')[-16:] 9535 encoded_filename = base64.b16encode(filename).lower() 9536 newfilename = '%s.%s.%s.%s' % \ 9537 (self._tablename, self.name, uuid_key, encoded_filename) 9538 newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension 9539 self_uploadfield = self.uploadfield 9540 if isinstance(self_uploadfield,Field): 9541 blob_uploadfield_name = self_uploadfield.uploadfield 9542 keys={self_uploadfield.name: newfilename, 9543 blob_uploadfield_name: file.read()} 9544 self_uploadfield.table.insert(**keys) 9545 elif self_uploadfield == True: 9546 if path: 9547 pass 9548 elif self.uploadfolder: 9549 path = self.uploadfolder 9550 elif self.db._adapter.folder: 9551 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9552 else: 9553 raise RuntimeError( 9554 "you must specify a Field(...,uploadfolder=...)") 9555 if self.uploadseparate: 9556 if self.uploadfs: 9557 raise RuntimeError("not supported") 9558 path = pjoin(path,"%s.%s" %(self._tablename, self.name), 9559 uuid_key[:2]) 9560 if not exists(path): 9561 os.makedirs(path) 9562 pathfilename = pjoin(path, newfilename) 9563 if self.uploadfs: 9564 dest_file = self.uploadfs.open(newfilename, 'wb') 9565 else: 9566 dest_file = open(pathfilename, 'wb') 9567 try: 9568 shutil.copyfileobj(file, dest_file) 9569 except IOError: 9570 raise IOError( 9571 'Unable to store file "%s" because invalid permissions, readonly file system, or filename too long' % pathfilename) 9572 dest_file.close() 9573 return newfilename
9574
9575 - def retrieve(self, name, path=None, nameonly=False):
9576 """ 9577 if nameonly==True return (filename, fullfilename) instead of 9578 (filename, stream) 9579 """ 9580 self_uploadfield = self.uploadfield 9581 if self.custom_retrieve: 9582 return self.custom_retrieve(name, path) 9583 import http 9584 if self.authorize or isinstance(self_uploadfield, str): 9585 row = self.db(self == name).select().first() 9586 if not row: 9587 raise http.HTTP(404) 9588 if self.authorize and not self.authorize(row): 9589 raise http.HTTP(403) 9590 file_properties = self.retrieve_file_properties(name,path) 9591 filename = file_properties['filename'] 9592 if isinstance(self_uploadfield, str): # ## if file is in DB 9593 stream = StringIO.StringIO(row[self_uploadfield] or '') 9594 elif isinstance(self_uploadfield,Field): 9595 blob_uploadfield_name = self_uploadfield.uploadfield 9596 query = self_uploadfield == name 9597 data = self_uploadfield.table(query)[blob_uploadfield_name] 9598 stream = StringIO.StringIO(data) 9599 elif self.uploadfs: 9600 # ## if file is on pyfilesystem 9601 stream = self.uploadfs.open(name, 'rb') 9602 else: 9603 # ## if file is on regular filesystem 9604 # this is intentially a sting with filename and not a stream 9605 # this propagates and allows stream_file_or_304_or_206 to be called 9606 fullname = pjoin(file_properties['path'],name) 9607 if nameonly: 9608 return (filename, fullname) 9609 stream = open(fullname,'rb') 9610 return (filename, stream)
9611
9612 - def retrieve_file_properties(self, name, path=None):
9613 m = REGEX_UPLOAD_PATTERN.match(name) 9614 if not m or not self.isattachment: 9615 raise TypeError('Can\'t retrieve %s file properties' % name) 9616 self_uploadfield = self.uploadfield 9617 if self.custom_retrieve_file_properties: 9618 return self.custom_retrieve_file_properties(name, path) 9619 if m.group('name'): 9620 try: 9621 filename = base64.b16decode(m.group('name'), True) 9622 filename = REGEX_CLEANUP_FN.sub('_', filename) 9623 except (TypeError, AttributeError): 9624 filename = name 9625 else: 9626 filename = name 9627 # ## if file is in DB 9628 if isinstance(self_uploadfield, (str, Field)): 9629 return dict(path=None,filename=filename) 9630 # ## if file is on filesystem 9631 if not path: 9632 if self.uploadfolder: 9633 path = self.uploadfolder 9634 else: 9635 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9636 if self.uploadseparate: 9637 t = m.group('table') 9638 f = m.group('field') 9639 u = m.group('uuidkey') 9640 path = pjoin(path,"%s.%s" % (t,f),u[:2]) 9641 return dict(path=path,filename=filename)
9642 9643
9644 - def formatter(self, value):
9645 requires = self.requires 9646 if value is None or not requires: 9647 return value or self.map_none 9648 if not isinstance(requires, (list, tuple)): 9649 requires = [requires] 9650 elif isinstance(requires, tuple): 9651 requires = list(requires) 9652 else: 9653 requires = copy.copy(requires) 9654 requires.reverse() 9655 for item in requires: 9656 if hasattr(item, 'formatter'): 9657 value = item.formatter(value) 9658 return value
9659
9660 - def validate(self, value):
9661 if not self.requires or self.requires == DEFAULT: 9662 return ((value if value!=self.map_none else None), None) 9663 requires = self.requires 9664 if not isinstance(requires, (list, tuple)): 9665 requires = [requires] 9666 for validator in requires: 9667 (value, error) = validator(value) 9668 if error: 9669 return (value, error) 9670 return ((value if value!=self.map_none else None), None)
9671
9672 - def count(self, distinct=None):
9673 return Expression(self.db, self.db._adapter.COUNT, self, distinct, 'integer')
9674
9675 - def as_dict(self, flat=False, sanitize=True):
9676 attrs = ("name", 'authorize', 'represent', 'ondelete', 9677 'custom_store', 'autodelete', 'custom_retrieve', 9678 'filter_out', 'uploadseparate', 'widget', 'uploadfs', 9679 'update', 'custom_delete', 'uploadfield', 'uploadfolder', 9680 'custom_qualifier', 'unique', 'writable', 'compute', 9681 'map_none', 'default', 'type', 'required', 'readable', 9682 'requires', 'comment', 'label', 'length', 'notnull', 9683 'custom_retrieve_file_properties', 'filter_in') 9684 serializable = (int, long, basestring, float, tuple, 9685 bool, type(None)) 9686 9687 def flatten(obj): 9688 if isinstance(obj, dict): 9689 return dict((flatten(k), flatten(v)) for k, v in 9690 obj.items()) 9691 elif isinstance(obj, (tuple, list, set)): 9692 return [flatten(v) for v in obj] 9693 elif isinstance(obj, serializable): 9694 return obj 9695 elif isinstance(obj, (datetime.datetime, 9696 datetime.date, datetime.time)): 9697 return str(obj) 9698 else: 9699 return None
9700 9701 d = dict() 9702 if not (sanitize and not (self.readable or self.writable)): 9703 for attr in attrs: 9704 if flat: 9705 d.update({attr: flatten(getattr(self, attr))}) 9706 else: 9707 d.update({attr: getattr(self, attr)}) 9708 d["fieldname"] = d.pop("name") 9709 return d
9710
9711 - def as_xml(self, sanitize=True):
9712 if have_serializers: 9713 xml = serializers.xml 9714 else: 9715 raise ImportError("No xml serializers available") 9716 d = self.as_dict(flat=True, sanitize=sanitize) 9717 return xml(d)
9718
9719 - def as_json(self, sanitize=True):
9720 if have_serializers: 9721 json = serializers.json 9722 else: 9723 raise ImportError("No json serializers available") 9724 d = self.as_dict(flat=True, sanitize=sanitize) 9725 return json(d)
9726
9727 - def as_yaml(self, sanitize=True):
9728 if have_serializers: 9729 d = self.as_dict(flat=True, sanitize=sanitize) 9730 return serializers.yaml(d) 9731 else: 9732 raise ImportError("No YAML serializers available")
9733
9734 - def __nonzero__(self):
9735 return True
9736
9737 - def __str__(self):
9738 try: 9739 return '%s.%s' % (self.tablename, self.name) 9740 except: 9741 return '<no table>.%s' % self.name
9742
9743 9744 -class Query(object):
9745 9746 """ 9747 a query object necessary to define a set. 9748 it can be stored or can be passed to DAL.__call__() to obtain a Set 9749 9750 Example:: 9751 9752 query = db.users.name=='Max' 9753 set = db(query) 9754 records = set.select() 9755 9756 """ 9757
9758 - def __init__( 9759 self, 9760 db, 9761 op, 9762 first=None, 9763 second=None, 9764 ignore_common_filters = False, 9765 **optional_args 9766 ):
9767 self.db = self._db = db 9768 self.op = op 9769 self.first = first 9770 self.second = second 9771 self.ignore_common_filters = ignore_common_filters 9772 self.optional_args = optional_args
9773
9774 - def __repr__(self):
9775 return '<Query %s>' % BaseAdapter.expand(self.db._adapter,self)
9776
9777 - def __str__(self):
9778 return self.db._adapter.expand(self)
9779
9780 - def __and__(self, other):
9781 return Query(self.db,self.db._adapter.AND,self,other)
9782 9783 __rand__ = __and__ 9784
9785 - def __or__(self, other):
9786 return Query(self.db,self.db._adapter.OR,self,other)
9787 9788 __ror__ = __or__ 9789
9790 - def __invert__(self):
9791 if self.op==self.db._adapter.NOT: 9792 return self.first 9793 return Query(self.db,self.db._adapter.NOT,self)
9794
9795 - def __eq__(self, other):
9796 return repr(self) == repr(other)
9797
9798 - def __ne__(self, other):
9799 return not (self == other)
9800
9801 - def case(self,t=1,f=0):
9802 return self.db._adapter.CASE(self,t,f)
9803
9804 - def as_dict(self, flat=False, sanitize=True):
9805 """Experimental stuff 9806 9807 This allows to return a plain dictionary with the basic 9808 query representation. Can be used with json/xml services 9809 for client-side db I/O 9810 9811 Example: 9812 >>> q = db.auth_user.id != 0 9813 >>> q.as_dict(flat=True) 9814 {"op": "NE", "first":{"tablename": "auth_user", 9815 "fieldname": "id"}, 9816 "second":0} 9817 """ 9818 9819 SERIALIZABLE_TYPES = (tuple, dict, set, list, int, long, float, 9820 basestring, type(None), bool) 9821 def loop(d): 9822 newd = dict() 9823 for k, v in d.items(): 9824 if k in ("first", "second"): 9825 if isinstance(v, self.__class__): 9826 newd[k] = loop(v.__dict__) 9827 elif isinstance(v, Field): 9828 newd[k] = {"tablename": v._tablename, 9829 "fieldname": v.name} 9830 elif isinstance(v, Expression): 9831 newd[k] = loop(v.__dict__) 9832 elif isinstance(v, SERIALIZABLE_TYPES): 9833 newd[k] = v 9834 elif isinstance(v, (datetime.date, 9835 datetime.time, 9836 datetime.datetime)): 9837 newd[k] = unicode(v) 9838 elif k == "op": 9839 if callable(v): 9840 newd[k] = v.__name__ 9841 elif isinstance(v, basestring): 9842 newd[k] = v 9843 else: pass # not callable or string 9844 elif isinstance(v, SERIALIZABLE_TYPES): 9845 if isinstance(v, dict): 9846 newd[k] = loop(v) 9847 else: newd[k] = v 9848 return newd
9849 9850 if flat: 9851 return loop(self.__dict__) 9852 else: return self.__dict__
9853 9854
9855 - def as_xml(self, sanitize=True):
9856 if have_serializers: 9857 xml = serializers.xml 9858 else: 9859 raise ImportError("No xml serializers available") 9860 d = self.as_dict(flat=True, sanitize=sanitize) 9861 return xml(d)
9862
9863 - def as_json(self, sanitize=True):
9864 if have_serializers: 9865 json = serializers.json 9866 else: 9867 raise ImportError("No json serializers available") 9868 d = self.as_dict(flat=True, sanitize=sanitize) 9869 return json(d)
9870
9871 -def xorify(orderby):
9872 if not orderby: 9873 return None 9874 orderby2 = orderby[0] 9875 for item in orderby[1:]: 9876 orderby2 = orderby2 | item 9877 return orderby2
9878
9879 -def use_common_filters(query):
9880 return (query and hasattr(query,'ignore_common_filters') and \ 9881 not query.ignore_common_filters)
9882
9883 -class Set(object):
9884 9885 """ 9886 a Set represents a set of records in the database, 9887 the records are identified by the query=Query(...) object. 9888 normally the Set is generated by DAL.__call__(Query(...)) 9889 9890 given a set, for example 9891 set = db(db.users.name=='Max') 9892 you can: 9893 set.update(db.users.name='Massimo') 9894 set.delete() # all elements in the set 9895 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 9896 and take subsets: 9897 subset = set(db.users.id<5) 9898 """ 9899
9900 - def __init__(self, db, query, ignore_common_filters = None):
9901 self.db = db 9902 self._db = db # for backward compatibility 9903 self.dquery = None 9904 9905 # if query is a dict, parse it 9906 if isinstance(query, dict): 9907 query = self.parse(query) 9908 9909 if not ignore_common_filters is None and \ 9910 use_common_filters(query) == ignore_common_filters: 9911 query = copy.copy(query) 9912 query.ignore_common_filters = ignore_common_filters 9913 self.query = query
9914
9915 - def __repr__(self):
9916 return '<Set %s>' % BaseAdapter.expand(self.db._adapter,self.query)
9917
9918 - def __call__(self, query, ignore_common_filters=False):
9919 if query is None: 9920 return self 9921 elif isinstance(query,Table): 9922 query = self.db._adapter.id_query(query) 9923 elif isinstance(query,str): 9924 query = Expression(self.db,query) 9925 elif isinstance(query,Field): 9926 query = query!=None 9927 if self.query: 9928 return Set(self.db, self.query & query, 9929 ignore_common_filters=ignore_common_filters) 9930 else: 9931 return Set(self.db, query, 9932 ignore_common_filters=ignore_common_filters)
9933
9934 - def _count(self,distinct=None):
9935 return self.db._adapter._count(self.query,distinct)
9936
9937 - def _select(self, *fields, **attributes):
9938 adapter = self.db._adapter 9939 tablenames = adapter.tables(self.query, 9940 attributes.get('join',None), 9941 attributes.get('left',None), 9942 attributes.get('orderby',None), 9943 attributes.get('groupby',None)) 9944 fields = adapter.expand_all(fields, tablenames) 9945 return adapter._select(self.query,fields,attributes)
9946
9947 - def _delete(self):
9948 db = self.db 9949 tablename = db._adapter.get_table(self.query) 9950 return db._adapter._delete(tablename,self.query)
9951
9952 - def _update(self, **update_fields):
9953 db = self.db 9954 tablename = db._adapter.get_table(self.query) 9955 fields = db[tablename]._listify(update_fields,update=True) 9956 return db._adapter._update(tablename,self.query,fields)
9957
9958 - def as_dict(self, flat=False, sanitize=True):
9959 if flat: 9960 uid = dbname = uri = None 9961 codec = self.db._db_codec 9962 if not sanitize: 9963 uri, dbname, uid = (self.db._dbname, str(self.db), 9964 self.db._db_uid) 9965 d = {"query": self.query.as_dict(flat=flat)} 9966 d["db"] = {"uid": uid, "codec": codec, 9967 "name": dbname, "uri": uri} 9968 return d 9969 else: return self.__dict__
9970
9971 - def as_xml(self, sanitize=True):
9972 if have_serializers: 9973 xml = serializers.xml 9974 else: 9975 raise ImportError("No xml serializers available") 9976 d = self.as_dict(flat=True, sanitize=sanitize) 9977 return xml(d)
9978
9979 - def as_json(self, sanitize=True):
9980 if have_serializers: 9981 json = serializers.json 9982 else: 9983 raise ImportError("No json serializers available") 9984 d = self.as_dict(flat=True, sanitize=sanitize) 9985 return json(d)
9986
9987 - def parse(self, dquery):
9988 "Experimental: Turn a dictionary into a Query object" 9989 self.dquery = dquery 9990 return self.build(self.dquery)
9991
9992 - def build(self, d):
9993 "Experimental: see .parse()" 9994 op, first, second = (d["op"], d["first"], 9995 d.get("second", None)) 9996 left = right = built = None 9997 9998 if op in ("AND", "OR"): 9999 if not (type(first), type(second)) == (dict, dict): 10000 raise SyntaxError("Invalid AND/OR query") 10001 if op == "AND": 10002 built = self.build(first) & self.build(second) 10003 else: built = self.build(first) | self.build(second) 10004 10005 elif op == "NOT": 10006 if first is None: 10007 raise SyntaxError("Invalid NOT query") 10008 built = ~self.build(first) 10009 else: 10010 # normal operation (GT, EQ, LT, ...) 10011 for k, v in {"left": first, "right": second}.items(): 10012 if isinstance(v, dict) and v.get("op"): 10013 v = self.build(v) 10014 if isinstance(v, dict) and ("tablename" in v): 10015 v = self.db[v["tablename"]][v["fieldname"]] 10016 if k == "left": left = v 10017 else: right = v 10018 10019 if hasattr(self.db._adapter, op): 10020 opm = getattr(self.db._adapter, op) 10021 10022 if op == "EQ": built = left == right 10023 elif op == "NE": built = left != right 10024 elif op == "GT": built = left > right 10025 elif op == "GE": built = left >= right 10026 elif op == "LT": built = left < right 10027 elif op == "LE": built = left <= right 10028 elif op in ("JOIN", "LEFT_JOIN", "RANDOM", "ALLOW_NULL"): 10029 built = Expression(self.db, opm) 10030 elif op in ("LOWER", "UPPER", "EPOCH", "PRIMARY_KEY", 10031 "COALESCE_ZERO", "RAW", "INVERT"): 10032 built = Expression(self.db, opm, left) 10033 elif op in ("COUNT", "EXTRACT", "AGGREGATE", "SUBSTRING", 10034 "REGEXP", "LIKE", "ILIKE", "STARTSWITH", 10035 "ENDSWITH", "ADD", "SUB", "MUL", "DIV", 10036 "MOD", "AS", "ON", "COMMA", "NOT_NULL", 10037 "COALESCE", "CONTAINS", "BELONGS"): 10038 built = Expression(self.db, opm, left, right) 10039 # expression as string 10040 elif not (left or right): built = Expression(self.db, op) 10041 else: 10042 raise SyntaxError("Operator not supported: %s" % op) 10043 10044 return built
10045
10046 - def isempty(self):
10047 return not self.select(limitby=(0,1), orderby_on_limitby=False)
10048
10049 - def count(self,distinct=None, cache=None):
10050 db = self.db 10051 if cache: 10052 cache_model, time_expire = cache 10053 sql = self._count(distinct=distinct) 10054 key = db._uri + '/' + sql 10055 if len(key)>200: key = hashlib_md5(key).hexdigest() 10056 return cache_model( 10057 key, 10058 (lambda self=self,distinct=distinct: \ 10059 db._adapter.count(self.query,distinct)), 10060 time_expire) 10061 return db._adapter.count(self.query,distinct)
10062
10063 - def select(self, *fields, **attributes):
10064 adapter = self.db._adapter 10065 tablenames = adapter.tables(self.query, 10066 attributes.get('join',None), 10067 attributes.get('left',None), 10068 attributes.get('orderby',None), 10069 attributes.get('groupby',None)) 10070 fields = adapter.expand_all(fields, tablenames) 10071 return adapter.select(self.query,fields,attributes)
10072
10073 - def nested_select(self,*fields,**attributes):
10074 return Expression(self.db,self._select(*fields,**attributes))
10075
10076 - def delete(self):
10077 db = self.db 10078 tablename = db._adapter.get_table(self.query) 10079 table = db[tablename] 10080 if any(f(self) for f in table._before_delete): return 0 10081 ret = db._adapter.delete(tablename,self.query) 10082 ret and [f(self) for f in table._after_delete] 10083 return ret
10084
10085 - def update(self, **update_fields):
10086 db = self.db 10087 tablename = db._adapter.get_table(self.query) 10088 table = db[tablename] 10089 table._attempt_upload(update_fields) 10090 if any(f(self,update_fields) for f in table._before_update): 10091 return 0 10092 fields = table._listify(update_fields,update=True) 10093 if not fields: 10094 raise SyntaxError("No fields to update") 10095 ret = db._adapter.update("%s" % table,self.query,fields) 10096 ret and [f(self,update_fields) for f in table._after_update] 10097 return ret
10098
10099 - def update_naive(self, **update_fields):
10100 """ 10101 same as update but does not call table._before_update and _after_update 10102 """ 10103 tablename = self.db._adapter.get_table(self.query) 10104 table = self.db[tablename] 10105 fields = table._listify(update_fields,update=True) 10106 if not fields: raise SyntaxError("No fields to update") 10107 10108 ret = self.db._adapter.update("%s" % table,self.query,fields) 10109 return ret
10110
10111 - def validate_and_update(self, **update_fields):
10112 tablename = self.db._adapter.get_table(self.query) 10113 response = Row() 10114 response.errors = Row() 10115 new_fields = copy.copy(update_fields) 10116 for key,value in update_fields.iteritems(): 10117 value,error = self.db[tablename][key].validate(value) 10118 if error: 10119 response.errors[key] = error 10120 else: 10121 new_fields[key] = value 10122 table = self.db[tablename] 10123 if response.errors: 10124 response.updated = None 10125 else: 10126 if not any(f(self,new_fields) for f in table._before_update): 10127 fields = table._listify(new_fields,update=True) 10128 if not fields: raise SyntaxError("No fields to update") 10129 ret = self.db._adapter.update(tablename,self.query,fields) 10130 ret and [f(self,new_fields) for f in table._after_update] 10131 else: 10132 ret = 0 10133 response.updated = ret 10134 return response
10135
10136 - def delete_uploaded_files(self, upload_fields=None):
10137 table = self.db[self.db._adapter.tables(self.query)[0]] 10138 # ## mind uploadfield==True means file is not in DB 10139 if upload_fields: 10140 fields = upload_fields.keys() 10141 else: 10142 fields = table.fields 10143 fields = [f for f in fields if table[f].type == 'upload' 10144 and table[f].uploadfield == True 10145 and table[f].autodelete] 10146 if not fields: 10147 return False 10148 for record in self.select(*[table[f] for f in fields]): 10149 for fieldname in fields: 10150 field = table[fieldname] 10151 oldname = record.get(fieldname, None) 10152 if not oldname: 10153 continue 10154 if upload_fields and oldname == upload_fields[fieldname]: 10155 continue 10156 if field.custom_delete: 10157 field.custom_delete(oldname) 10158 else: 10159 uploadfolder = field.uploadfolder 10160 if not uploadfolder: 10161 uploadfolder = pjoin( 10162 self.db._adapter.folder, '..', 'uploads') 10163 if field.uploadseparate: 10164 items = oldname.split('.') 10165 uploadfolder = pjoin( 10166 uploadfolder, 10167 "%s.%s" % (items[0], items[1]), 10168 items[2][:2]) 10169 oldpath = pjoin(uploadfolder, oldname) 10170 if exists(oldpath): 10171 os.unlink(oldpath) 10172 return False
10173
10174 -class RecordUpdater(object):
10175 - def __init__(self, colset, table, id):
10176 self.colset, self.db, self.tablename, self.id = \ 10177 colset, table._db, table._tablename, id
10178
10179 - def __call__(self, **fields):
10180 colset, db, tablename, id = self.colset, self.db, self.tablename, self.id 10181 table = db[tablename] 10182 newfields = fields or dict(colset) 10183 for fieldname in newfields.keys(): 10184 if not fieldname in table.fields or table[fieldname].type=='id': 10185 del newfields[fieldname] 10186 table._db(table._id==id,ignore_common_filters=True).update(**newfields) 10187 colset.update(newfields) 10188 return colset
10189
10190 -class RecordDeleter(object):
10191 - def __init__(self, table, id):
10192 self.db, self.tablename, self.id = table._db, table._tablename, id
10193 - def __call__(self):
10194 return self.db(self.db[self.tablename]._id==self.id).delete()
10195
10196 -class LazyReferenceGetter(object):
10197 - def __init__(self, table, id):
10198 self.db, self.tablename, self.id = table._db, table._tablename, id
10199 - def __call__(self, other_tablename):
10200 if self.db._lazy_tables is False: 10201 raise AttributeError() 10202 table = self.db[self.tablename] 10203 other_table = self.db[other_tablename] 10204 for rfield in table._referenced_by: 10205 if rfield.table == other_table: 10206 return LazySet(rfield, self.id) 10207 10208 raise AttributeError()
10209
10210 -class LazySet(object):
10211 - def __init__(self, field, id):
10212 self.db, self.tablename, self.fieldname, self.id = \ 10213 field.db, field._tablename, field.name, id
10214 - def _getset(self):
10215 query = self.db[self.tablename][self.fieldname]==self.id 10216 return Set(self.db,query)
10217 - def __repr__(self):
10218 return repr(self._getset())
10219 - def __call__(self, query, ignore_common_filters=False):
10220 return self._getset()(query, ignore_common_filters)
10221 - def _count(self,distinct=None):
10222 return self._getset()._count(distinct)
10223 - def _select(self, *fields, **attributes):
10224 return self._getset()._select(*fields,**attributes)
10225 - def _delete(self):
10226 return self._getset()._delete()
10227 - def _update(self, **update_fields):
10228 return self._getset()._update(**update_fields)
10229 - def isempty(self):
10230 return self._getset().isempty()
10231 - def count(self,distinct=None, cache=None):
10232 return self._getset().count(distinct,cache)
10233 - def select(self, *fields, **attributes):
10234 return self._getset().select(*fields,**attributes)
10235 - def nested_select(self,*fields,**attributes):
10236 return self._getset().nested_select(*fields,**attributes)
10237 - def delete(self):
10238 return self._getset().delete()
10239 - def update(self, **update_fields):
10240 return self._getset().update(**update_fields)
10241 - def update_naive(self, **update_fields):
10242 return self._getset().update_naive(**update_fields)
10243 - def validate_and_update(self, **update_fields):
10244 return self._getset().validate_and_update(**update_fields)
10245 - def delete_uploaded_files(self, upload_fields=None):
10246 return self._getset().delete_uploaded_files(upload_fields)
10247
10248 -class VirtualCommand(object):
10249 - def __init__(self,method,row):
10250 self.method=method 10251 self.row=row
10252 - def __call__(self,*args,**kwargs):
10253 return self.method(self.row,*args,**kwargs)
10254
10255 -def lazy_virtualfield(f):
10256 f.__lazy__ = True 10257 return f
10258
10259 -class Rows(object):
10260 10261 """ 10262 A wrapper for the return value of a select. It basically represents a table. 10263 It has an iterator and each row is represented as a dictionary. 10264 """ 10265 10266 # ## TODO: this class still needs some work to care for ID/OID 10267
10268 - def __init__( 10269 self, 10270 db=None, 10271 records=[], 10272 colnames=[], 10273 compact=True, 10274 rawrows=None 10275 ):
10276 self.db = db 10277 self.records = records 10278 self.colnames = colnames 10279 self.compact = compact 10280 self.response = rawrows
10281
10282 - def __repr__(self):
10283 return '<Rows (%s)>' % len(self.records)
10284
10285 - def setvirtualfields(self,**keyed_virtualfields):
10286 """ 10287 db.define_table('x',Field('number','integer')) 10288 if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] 10289 10290 from gluon.dal import lazy_virtualfield 10291 10292 class MyVirtualFields(object): 10293 # normal virtual field (backward compatible, discouraged) 10294 def normal_shift(self): return self.x.number+1 10295 # lazy virtual field (because of @staticmethod) 10296 @lazy_virtualfield 10297 def lazy_shift(instance,row,delta=4): return row.x.number+delta 10298 db.x.virtualfields.append(MyVirtualFields()) 10299 10300 for row in db(db.x).select(): 10301 print row.number, row.normal_shift, row.lazy_shift(delta=7) 10302 """ 10303 if not keyed_virtualfields: 10304 return self 10305 for row in self.records: 10306 for (tablename,virtualfields) in keyed_virtualfields.iteritems(): 10307 attributes = dir(virtualfields) 10308 if not tablename in row: 10309 box = row[tablename] = Row() 10310 else: 10311 box = row[tablename] 10312 updated = False 10313 for attribute in attributes: 10314 if attribute[0] != '_': 10315 method = getattr(virtualfields,attribute) 10316 if hasattr(method,'__lazy__'): 10317 box[attribute]=VirtualCommand(method,row) 10318 elif type(method)==types.MethodType: 10319 if not updated: 10320 virtualfields.__dict__.update(row) 10321 updated = True 10322 box[attribute]=method() 10323 return self
10324
10325 - def __and__(self,other):
10326 if self.colnames!=other.colnames: 10327 raise Exception('Cannot & incompatible Rows objects') 10328 records = self.records+other.records 10329 return Rows(self.db,records,self.colnames)
10330
10331 - def __or__(self,other):
10332 if self.colnames!=other.colnames: 10333 raise Exception('Cannot | incompatible Rows objects') 10334 records = self.records 10335 records += [record for record in other.records \ 10336 if not record in records] 10337 return Rows(self.db,records,self.colnames)
10338
10339 - def __nonzero__(self):
10340 if len(self.records): 10341 return 1 10342 return 0
10343
10344 - def __len__(self):
10345 return len(self.records)
10346
10347 - def __getslice__(self, a, b):
10348 return Rows(self.db,self.records[a:b],self.colnames,compact=self.compact)
10349
10350 - def __getitem__(self, i):
10351 row = self.records[i] 10352 keys = row.keys() 10353 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10354 return row[row.keys()[0]] 10355 return row
10356
10357 - def __iter__(self):
10358 """ 10359 iterator over records 10360 """ 10361 10362 for i in xrange(len(self)): 10363 yield self[i]
10364
10365 - def __str__(self):
10366 """ 10367 serializes the table into a csv file 10368 """ 10369 10370 s = StringIO.StringIO() 10371 self.export_to_csv_file(s) 10372 return s.getvalue()
10373
10374 - def first(self):
10375 if not self.records: 10376 return None 10377 return self[0]
10378
10379 - def last(self):
10380 if not self.records: 10381 return None 10382 return self[-1]
10383
10384 - def find(self,f,limitby=None):
10385 """ 10386 returns a new Rows object, a subset of the original object, 10387 filtered by the function f 10388 """ 10389 if not self: 10390 return Rows(self.db, [], self.colnames) 10391 records = [] 10392 if limitby: 10393 a,b = limitby 10394 else: 10395 a,b = 0,len(self) 10396 k = 0 10397 for row in self: 10398 if f(row): 10399 if a<=k: records.append(row) 10400 k += 1 10401 if k==b: break 10402 return Rows(self.db, records, self.colnames)
10403
10404 - def exclude(self, f):
10405 """ 10406 removes elements from the calling Rows object, filtered by the function f, 10407 and returns a new Rows object containing the removed elements 10408 """ 10409 if not self.records: 10410 return Rows(self.db, [], self.colnames) 10411 removed = [] 10412 i=0 10413 while i<len(self): 10414 row = self[i] 10415 if f(row): 10416 removed.append(self.records[i]) 10417 del self.records[i] 10418 else: 10419 i += 1 10420 return Rows(self.db, removed, self.colnames)
10421
10422 - def sort(self, f, reverse=False):
10423 """ 10424 returns a list of sorted elements (not sorted in place) 10425 """ 10426 rows = Rows(self.db,[],self.colnames,compact=False) 10427 rows.records = sorted(self,key=f,reverse=reverse) 10428 return rows
10429
10430 - def group_by_value(self, *fields, **args):
10431 """ 10432 regroups the rows, by one of the fields 10433 """ 10434 one_result = False 10435 if 'one_result' in args: 10436 one_result = args['one_result'] 10437 10438 def build_fields_struct(row, fields, num, groups): 10439 ''' helper function: 10440 ''' 10441 if num > len(fields)-1: 10442 if one_result: 10443 return row 10444 else: 10445 return [row] 10446 10447 key = fields[num] 10448 value = row[key] 10449 10450 if value not in groups: 10451 groups[value] = build_fields_struct(row, fields, num+1, {}) 10452 else: 10453 struct = build_fields_struct(row, fields, num+1, groups[ value ]) 10454 10455 # still have more grouping to do 10456 if type(struct) == type(dict()): 10457 groups[value].update() 10458 # no more grouping, first only is off 10459 elif type(struct) == type(list()): 10460 groups[value] += struct 10461 # no more grouping, first only on 10462 else: 10463 groups[value] = struct 10464 10465 return groups
10466 10467 if len(fields) == 0: 10468 return self 10469 10470 # if select returned no results 10471 if not self.records: 10472 return {} 10473 10474 grouped_row_group = dict() 10475 10476 # build the struct 10477 for row in self: 10478 build_fields_struct(row, fields, 0, grouped_row_group) 10479 10480 return grouped_row_group
10481
10482 - def render(self, i=None, fields=None):
10483 """ 10484 Takes an index and returns a copy of the indexed row with values 10485 transformed via the "represent" attributes of the associated fields. 10486 10487 If no index is specified, a generator is returned for iteration 10488 over all the rows. 10489 10490 fields -- a list of fields to transform (if None, all fields with 10491 "represent" attributes will be transformed). 10492 """ 10493 10494 10495 if i is None: 10496 return (self.repr(i, fields=fields) for i in range(len(self))) 10497 import sqlhtml 10498 row = copy.deepcopy(self.records[i]) 10499 keys = row.keys() 10500 tables = [f.tablename for f in fields] if fields \ 10501 else [k for k in keys if k != '_extra'] 10502 for table in tables: 10503 repr_fields = [f.name for f in fields if f.tablename == table] \ 10504 if fields else [k for k in row[table].keys() 10505 if (hasattr(self.db[table], k) and 10506 isinstance(self.db[table][k], Field) 10507 and self.db[table][k].represent)] 10508 for field in repr_fields: 10509 row[table][field] = sqlhtml.represent( 10510 self.db[table][field], row[table][field], row[table]) 10511 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10512 return row[keys[0]] 10513 return row
10514
10515 - def as_list(self, 10516 compact=True, 10517 storage_to_dict=True, 10518 datetime_to_str=False, 10519 custom_types=None):
10520 """ 10521 returns the data as a list or dictionary. 10522 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10523 :param datetime_to_str: convert datetime fields as strings (default False) 10524 """ 10525 (oc, self.compact) = (self.compact, compact) 10526 if storage_to_dict: 10527 items = [item.as_dict(datetime_to_str, custom_types) for item in self] 10528 else: 10529 items = [item for item in self] 10530 self.compact = compact 10531 return items
10532 10533
10534 - def as_dict(self, 10535 key='id', 10536 compact=True, 10537 storage_to_dict=True, 10538 datetime_to_str=False, 10539 custom_types=None):
10540 """ 10541 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 10542 10543 :param key: the name of the field to be used as dict key, normally the id 10544 :param compact: ? (default True) 10545 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10546 :param datetime_to_str: convert datetime fields as strings (default False) 10547 """ 10548 10549 # test for multiple rows 10550 multi = False 10551 f = self.first() 10552 if f and isinstance(key, basestring): 10553 multi = any([isinstance(v, f.__class__) for v in f.values()]) 10554 if (not "." in key) and multi: 10555 # No key provided, default to int indices 10556 def new_key(): 10557 i = 0 10558 while True: 10559 yield i 10560 i += 1
10561 key_generator = new_key() 10562 key = lambda r: key_generator.next() 10563 10564 rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types) 10565 if isinstance(key,str) and key.count('.')==1: 10566 (table, field) = key.split('.') 10567 return dict([(r[table][field],r) for r in rows]) 10568 elif isinstance(key,str): 10569 return dict([(r[key],r) for r in rows]) 10570 else: 10571 return dict([(key(r),r) for r in rows]) 10572
10573 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
10574 """ 10575 export data to csv, the first line contains the column names 10576 10577 :param ofile: where the csv must be exported to 10578 :param null: how null values must be represented (default '<NULL>') 10579 :param delimiter: delimiter to separate values (default ',') 10580 :param quotechar: character to use to quote string values (default '"') 10581 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 10582 :param represent: use the fields .represent value (default False) 10583 :param colnames: list of column names to use (default self.colnames) 10584 This will only work when exporting rows objects!!!! 10585 DO NOT use this with db.export_to_csv() 10586 """ 10587 delimiter = kwargs.get('delimiter', ',') 10588 quotechar = kwargs.get('quotechar', '"') 10589 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 10590 represent = kwargs.get('represent', False) 10591 writer = csv.writer(ofile, delimiter=delimiter, 10592 quotechar=quotechar, quoting=quoting) 10593 colnames = kwargs.get('colnames', self.colnames) 10594 write_colnames = kwargs.get('write_colnames',True) 10595 # a proper csv starting with the column names 10596 if write_colnames: 10597 writer.writerow(colnames) 10598 10599 def none_exception(value): 10600 """ 10601 returns a cleaned up value that can be used for csv export: 10602 - unicode text is encoded as such 10603 - None values are replaced with the given representation (default <NULL>) 10604 """ 10605 if value is None: 10606 return null 10607 elif isinstance(value, unicode): 10608 return value.encode('utf8') 10609 elif isinstance(value,Reference): 10610 return long(value) 10611 elif hasattr(value, 'isoformat'): 10612 return value.isoformat()[:19].replace('T', ' ') 10613 elif isinstance(value, (list,tuple)): # for type='list:..' 10614 return bar_encode(value) 10615 return value
10616 10617 for record in self: 10618 row = [] 10619 for col in colnames: 10620 if not REGEX_TABLE_DOT_FIELD.match(col): 10621 row.append(record._extra[col]) 10622 else: 10623 (t, f) = col.split('.') 10624 field = self.db[t][f] 10625 if isinstance(record.get(t, None), (Row,dict)): 10626 value = record[t][f] 10627 else: 10628 value = record[f] 10629 if field.type=='blob' and not value is None: 10630 value = base64.b64encode(value) 10631 elif represent and field.represent: 10632 value = field.represent(value) 10633 row.append(none_exception(value)) 10634 writer.writerow(row) 10635
10636 - def xml(self,strict=False,row_name='row',rows_name='rows'):
10637 """ 10638 serializes the table using sqlhtml.SQLTABLE (if present) 10639 """ 10640 10641 if strict: 10642 ncols = len(self.colnames) 10643 return '<%s>\n%s\n</%s>' % (rows_name, 10644 '\n'.join(row.as_xml(row_name=row_name, 10645 colnames=self.colnames) for 10646 row in self), rows_name) 10647 10648 import sqlhtml 10649 return sqlhtml.SQLTABLE(self).xml()
10650
10651 - def as_xml(self,row_name='row',rows_name='rows'):
10652 return self.xml(strict=True, row_name=row_name, rows_name=rows_name)
10653
10654 - def as_json(self, mode='object', default=None):
10655 """ 10656 serializes the rows to a JSON list or object with objects 10657 mode='object' is not implemented (should return a nested 10658 object structure) 10659 """ 10660 10661 items = [record.as_json(mode=mode, default=default, 10662 serialize=False, 10663 colnames=self.colnames) for 10664 record in self] 10665 10666 if have_serializers: 10667 return serializers.json(items, 10668 default=default or 10669 serializers.custom_json) 10670 elif simplejson: 10671 return simplejson.dumps(items) 10672 else: 10673 raise RuntimeError("missing simplejson")
10674 10675 # for consistent naming yet backwards compatible 10676 as_csv = __str__ 10677 json = as_json 10678
10679 10680 ################################################################################ 10681 # dummy function used to define some doctests 10682 ################################################################################ 10683 10684 -def test_all():
10685 """ 10686 10687 >>> if len(sys.argv)<2: db = DAL("sqlite://test.db") 10688 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 10689 >>> tmp = db.define_table('users',\ 10690 Field('stringf', 'string', length=32, required=True),\ 10691 Field('booleanf', 'boolean', default=False),\ 10692 Field('passwordf', 'password', notnull=True),\ 10693 Field('uploadf', 'upload'),\ 10694 Field('blobf', 'blob'),\ 10695 Field('integerf', 'integer', unique=True),\ 10696 Field('doublef', 'double', unique=True,notnull=True),\ 10697 Field('jsonf', 'json'),\ 10698 Field('datef', 'date', default=datetime.date.today()),\ 10699 Field('timef', 'time'),\ 10700 Field('datetimef', 'datetime'),\ 10701 migrate='test_user.table') 10702 10703 Insert a field 10704 10705 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 10706 uploadf=None, integerf=5, doublef=3.14,\ 10707 jsonf={"j": True},\ 10708 datef=datetime.date(2001, 1, 1),\ 10709 timef=datetime.time(12, 30, 15),\ 10710 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 10711 1 10712 10713 Drop the table 10714 10715 >>> db.users.drop() 10716 10717 Examples of insert, select, update, delete 10718 10719 >>> tmp = db.define_table('person',\ 10720 Field('name'),\ 10721 Field('birth','date'),\ 10722 migrate='test_person.table') 10723 >>> person_id = db.person.insert(name='Marco',birth='2005-06-22') 10724 >>> person_id = db.person.insert(name='Massimo',birth='1971-12-21') 10725 10726 commented len(db().select(db.person.ALL)) 10727 commented 2 10728 10729 >>> me = db(db.person.id==person_id).select()[0] # test select 10730 >>> me.name 10731 'Massimo' 10732 >>> db.person[2].name 10733 'Massimo' 10734 >>> db.person(2).name 10735 'Massimo' 10736 >>> db.person(name='Massimo').name 10737 'Massimo' 10738 >>> db.person(db.person.name=='Massimo').name 10739 'Massimo' 10740 >>> row = db.person[2] 10741 >>> row.name == row['name'] == row['person.name'] == row('person.name') 10742 True 10743 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 10744 1 10745 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 10746 1 10747 10748 Update a single record 10749 10750 >>> me.update_record(name="Max") 10751 <Row {'name': 'Max', 'birth': datetime.date(1971, 12, 21), 'id': 2}> 10752 >>> me.name 10753 'Max' 10754 10755 Examples of complex search conditions 10756 10757 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 10758 1 10759 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 10760 1 10761 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 10762 1 10763 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 10764 >>> me.name 10765 'Max' 10766 10767 Examples of search conditions using extract from date/datetime/time 10768 10769 >>> len(db(db.person.birth.month()==12).select()) 10770 1 10771 >>> len(db(db.person.birth.year()>1900).select()) 10772 1 10773 10774 Example of usage of NULL 10775 10776 >>> len(db(db.person.birth==None).select()) ### test NULL 10777 0 10778 >>> len(db(db.person.birth!=None).select()) ### test NULL 10779 1 10780 10781 Examples of search conditions using lower, upper, and like 10782 10783 >>> len(db(db.person.name.upper()=='MAX').select()) 10784 1 10785 >>> len(db(db.person.name.like('%ax')).select()) 10786 1 10787 >>> len(db(db.person.name.upper().like('%AX')).select()) 10788 1 10789 >>> len(db(~db.person.name.upper().like('%AX')).select()) 10790 0 10791 10792 orderby, groupby and limitby 10793 10794 >>> people = db().select(db.person.name, orderby=db.person.name) 10795 >>> order = db.person.name|~db.person.birth 10796 >>> people = db().select(db.person.name, orderby=order) 10797 10798 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 10799 10800 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 10801 10802 Example of one 2 many relation 10803 10804 >>> tmp = db.define_table('dog',\ 10805 Field('name'),\ 10806 Field('birth','date'),\ 10807 Field('owner',db.person),\ 10808 migrate='test_dog.table') 10809 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 10810 1 10811 10812 A simple JOIN 10813 10814 >>> len(db(db.dog.owner==db.person.id).select()) 10815 1 10816 10817 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 10818 1 10819 10820 Drop tables 10821 10822 >>> db.dog.drop() 10823 >>> db.person.drop() 10824 10825 Example of many 2 many relation and Set 10826 10827 >>> tmp = db.define_table('author', Field('name'),\ 10828 migrate='test_author.table') 10829 >>> tmp = db.define_table('paper', Field('title'),\ 10830 migrate='test_paper.table') 10831 >>> tmp = db.define_table('authorship',\ 10832 Field('author_id', db.author),\ 10833 Field('paper_id', db.paper),\ 10834 migrate='test_authorship.table') 10835 >>> aid = db.author.insert(name='Massimo') 10836 >>> pid = db.paper.insert(title='QCD') 10837 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 10838 10839 Define a Set 10840 10841 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 10842 >>> rows = authored_papers.select(db.author.name, db.paper.title) 10843 >>> for row in rows: print row.author.name, row.paper.title 10844 Massimo QCD 10845 10846 Example of search condition using belongs 10847 10848 >>> set = (1, 2, 3) 10849 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 10850 >>> print rows[0].title 10851 QCD 10852 10853 Example of search condition using nested select 10854 10855 >>> nested_select = db()._select(db.authorship.paper_id) 10856 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 10857 >>> print rows[0].title 10858 QCD 10859 10860 Example of expressions 10861 10862 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 10863 >>> db(mynumber).delete() 10864 0 10865 >>> for i in range(10): tmp = mynumber.insert(x=i) 10866 >>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum()) 10867 45 10868 10869 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 10870 5 10871 10872 Output in csv 10873 10874 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 10875 author.name,paper.title\r 10876 Massimo,QCD 10877 10878 Delete all leftover tables 10879 10880 >>> DAL.distributed_transaction_commit(db) 10881 10882 >>> db.mynumber.drop() 10883 >>> db.authorship.drop() 10884 >>> db.author.drop() 10885 >>> db.paper.drop() 10886 """
10887 ################################################################################ 10888 # deprecated since the new DAL; here only for backward compatibility 10889 ################################################################################ 10890 10891 SQLField = Field 10892 SQLTable = Table 10893 SQLXorable = Expression 10894 SQLQuery = Query 10895 SQLSet = Set 10896 SQLRows = Rows 10897 SQLStorage = Row 10898 SQLDB = DAL 10899 GQLDB = DAL 10900 DAL.Field = Field # was necessary in gluon/globals.py session.connect 10901 DAL.Table = Table # was necessary in gluon/globals.py session.connect
10902 10903 ################################################################################ 10904 # Geodal utils 10905 ################################################################################ 10906 10907 -def geoPoint(x,y):
10908 return "POINT (%f %f)" % (x,y)
10909
10910 -def geoLine(*line):
10911 return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line)
10912
10913 -def geoPolygon(*line):
10914 return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line)
10915 10916 ################################################################################ 10917 # run tests 10918 ################################################################################ 10919 10920 if __name__ == '__main__': 10921 import doctest 10922 doctest.testmod() 10923